From 7d0f512d139413c3784a65bbf40e712b5afef35a Mon Sep 17 00:00:00 2001 From: "roberto.cirillo" Date: Tue, 2 Oct 2018 10:05:43 +0000 Subject: [PATCH] merge from trunk git-svn-id: http://svn.research-infrastructures.eu/public/d4science/gcube/branches/content-management/storage-manager-core/2.9@171824 82a268e6-3cf1-43bd-a215-b396298e98cf --- .classpath | 36 + .project | 23 + .settings/org.eclipse.core.resources.prefs | 7 + .settings/org.eclipse.jdt.core.prefs | 12 + .settings/org.eclipse.m2e.core.prefs | 5 + distro/CHANGELOG | 25 + distro/LICENSE | 2 + distro/README | Bin 0 -> 1515 bytes distro/changelog.xml | 84 ++ distro/descriptor.xml | 32 + distro/profile.xml | 25 + pom.xml | 109 ++ src/main/java/log4j.properties | 15 + .../blobstorage/coding/IEncode.java | 48 + .../blobstorage/report/Report.java | 36 + .../report/ReportAccountingImpl.java | 28 + .../blobstorage/report/ReportConfig.java | 8 + .../blobstorage/report/ReportException.java | 42 + .../blobstorage/report/ReportFactory.java | 42 + .../blobstorage/resource/AccessType.java | 14 + .../blobstorage/resource/MemoryType.java | 12 + .../blobstorage/resource/MyFile.java | 694 ++++++++++ .../resource/OperationDefinition.java | 140 ++ .../blobstorage/resource/StorageObject.java | 76 ++ .../blobstorage/service/IClient.java | 262 ++++ .../directoryOperation/BucketCoding.java | 188 +++ .../directoryOperation/DirectoryBucket.java | 237 ++++ .../directoryOperation/DirectoryEntity.java | 80 ++ .../service/directoryOperation/Encrypter.java | 129 ++ .../service/impl/AmbiguousResource.java | 33 + .../service/impl/LocalResource.java | 139 ++ .../service/impl/RemoteResource.java | 156 +++ .../service/impl/RemoteResourceBoolean.java | 77 ++ .../impl/RemoteResourceComplexInfo.java | 35 + .../impl/RemoteResourceDestination.java | 45 + .../impl/RemoteResourceFolderInfo.java | 84 ++ .../service/impl/RemoteResourceInfo.java | 53 + .../service/impl/RemoteResourceSource.java | 39 + .../blobstorage/service/impl/Resource.java | 138 ++ .../service/impl/ServiceEngine.java | 1165 +++++++++++++++++ .../service/operation/ChunkConsumer.java | 146 +++ .../service/operation/ChunkOptimization.java | 49 + .../service/operation/ChunkProducer.java | 127 ++ .../blobstorage/service/operation/Close.java | 49 + .../blobstorage/service/operation/Copy.java | 100 ++ .../service/operation/CopyDir.java | 100 ++ .../service/operation/Download.java | 111 ++ .../service/operation/DownloadAndLock.java | 67 + .../service/operation/DuplicateFile.java | 93 ++ .../blobstorage/service/operation/Exist.java | 72 + .../service/operation/FileWriter.java | 99 ++ .../service/operation/GetFolderCount.java | 64 + .../operation/GetFolderLastUpdate.java | 52 + .../service/operation/GetFolderSize.java | 65 + .../service/operation/GetHttpUrl.java | 103 ++ .../service/operation/GetHttpsUrl.java | 103 ++ .../service/operation/GetMetaFile.java | 86 ++ .../service/operation/GetMetaInfo.java | 62 + .../service/operation/GetRemotePath.java | 70 + .../service/operation/GetSize.java | 68 + .../blobstorage/service/operation/GetTTL.java | 69 + .../blobstorage/service/operation/GetUrl.java | 76 ++ .../service/operation/GetUserTotalItems.java | 72 + .../service/operation/GetUserTotalVolume.java | 71 + .../blobstorage/service/operation/Link.java | 95 ++ .../blobstorage/service/operation/Lock.java | 124 ++ .../service/operation/Monitor.java | 88 ++ .../blobstorage/service/operation/Move.java | 96 ++ .../service/operation/MoveDir.java | 95 ++ .../service/operation/Operation.java | 385 ++++++ .../service/operation/OperationFactory.java | 114 ++ .../service/operation/OperationManager.java | 148 +++ .../blobstorage/service/operation/Remove.java | 88 ++ .../service/operation/RenewTTL.java | 64 + .../service/operation/SetMetaInfo.java | 61 + .../service/operation/SoftCopy.java | 130 ++ .../blobstorage/service/operation/Unlock.java | 129 ++ .../blobstorage/service/operation/Upload.java | 165 +++ .../service/operation/UploadAndUnlock.java | 62 + .../blobstorage/test/SimpleTest2.java | 28 + .../transport/TransportManager.java | 357 +++++ .../transport/TransportManagerFactory.java | 76 ++ .../transport/backend/BsonOperator.java | 49 + .../transport/backend/CollectionOperator.java | 50 + .../backend/HttpTerrastoreClient.java | 87 ++ .../transport/backend/MongoIOManager.java | 1152 ++++++++++++++++ .../backend/MongoOperationManager.java | 687 ++++++++++ .../backend/RemoteBackendException.java | 28 + .../backend/operation/CopyDirOperator.java | 103 ++ .../backend/operation/CopyOperator.java | 129 ++ .../backend/operation/DownloadOperator.java | 75 ++ .../backend/operation/DuplicateOperator.java | 74 ++ .../backend/operation/LinkOperator.java | 131 ++ .../backend/operation/LockOperator.java | 86 ++ .../backend/operation/MoveDirOperator.java | 110 ++ .../backend/operation/MoveOperator.java | 201 +++ .../backend/operation/SoftCopyOperator.java | 318 +++++ .../backend/operation/UnlockOperator.java | 101 ++ .../backend/operation/UploadOperator.java | 112 ++ .../transport/backend/util/Costants.java | 88 ++ .../transport/backend/util/DateUtils.java | 26 + .../transport/backend/util/GetMD5.java | 51 + .../transport/backend/util/GetPayloadMap.java | 25 + .../backend/util/MongoInputStream.java | 103 ++ .../backend/util/MongoOutputStream.java | 82 ++ .../transport/backend/util/Utils.java | 48 + 106 files changed, 12570 insertions(+) create mode 100644 .classpath create mode 100644 .project create mode 100644 .settings/org.eclipse.core.resources.prefs create mode 100644 .settings/org.eclipse.jdt.core.prefs create mode 100644 .settings/org.eclipse.m2e.core.prefs create mode 100644 distro/CHANGELOG create mode 100644 distro/LICENSE create mode 100644 distro/README create mode 100644 distro/changelog.xml create mode 100644 distro/descriptor.xml create mode 100644 distro/profile.xml create mode 100644 pom.xml create mode 100644 src/main/java/log4j.properties create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/coding/IEncode.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/report/Report.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportAccountingImpl.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportConfig.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportException.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportFactory.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/resource/AccessType.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/resource/MemoryType.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/resource/MyFile.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/resource/OperationDefinition.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/resource/StorageObject.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/IClient.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/BucketCoding.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/DirectoryBucket.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/DirectoryEntity.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/Encrypter.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/AmbiguousResource.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/LocalResource.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResource.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceBoolean.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceComplexInfo.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceDestination.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceFolderInfo.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceInfo.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceSource.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/Resource.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/ServiceEngine.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkConsumer.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkOptimization.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkProducer.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Close.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Copy.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/CopyDir.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Download.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DownloadAndLock.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DuplicateFile.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Exist.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/FileWriter.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderCount.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderLastUpdate.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderSize.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpUrl.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpsUrl.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaFile.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaInfo.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetRemotePath.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetSize.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetTTL.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUrl.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalItems.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalVolume.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Link.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Lock.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Monitor.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Move.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/MoveDir.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Operation.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationFactory.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationManager.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Remove.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/RenewTTL.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SetMetaInfo.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SoftCopy.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Unlock.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Upload.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/UploadAndUnlock.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/test/SimpleTest2.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManager.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManagerFactory.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/BsonOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/CollectionOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/HttpTerrastoreClient.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoIOManager.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoOperationManager.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/RemoteBackendException.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyDirOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DownloadOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DuplicateOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LinkOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LockOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveDirOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/SoftCopyOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UnlockOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UploadOperator.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Costants.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/DateUtils.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetMD5.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetPayloadMap.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoInputStream.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoOutputStream.java create mode 100644 src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Utils.java diff --git a/.classpath b/.classpath new file mode 100644 index 0000000..e43402f --- /dev/null +++ b/.classpath @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.project b/.project new file mode 100644 index 0000000..cd1394b --- /dev/null +++ b/.project @@ -0,0 +1,23 @@ + + + storage-manager-core + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.jdt.core.javanature + org.eclipse.m2e.core.maven2Nature + + diff --git a/.settings/org.eclipse.core.resources.prefs b/.settings/org.eclipse.core.resources.prefs new file mode 100644 index 0000000..2ecc58a --- /dev/null +++ b/.settings/org.eclipse.core.resources.prefs @@ -0,0 +1,7 @@ +#Wed May 02 15:06:23 CEST 2012 +eclipse.preferences.version=1 +encoding//src/main/java=UTF-8 +encoding//src/main/resources=UTF-8 +encoding//src/test/java=UTF-8 +encoding//src/test/resources=UTF-8 +encoding/=UTF-8 diff --git a/.settings/org.eclipse.jdt.core.prefs b/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 0000000..6249222 --- /dev/null +++ b/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,12 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7 +org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve +org.eclipse.jdt.core.compiler.compliance=1.7 +org.eclipse.jdt.core.compiler.debug.lineNumber=generate +org.eclipse.jdt.core.compiler.debug.localVariable=generate +org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.source=1.7 diff --git a/.settings/org.eclipse.m2e.core.prefs b/.settings/org.eclipse.m2e.core.prefs new file mode 100644 index 0000000..b711f31 --- /dev/null +++ b/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,5 @@ +#Wed May 02 14:48:43 CEST 2012 +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/distro/CHANGELOG b/distro/CHANGELOG new file mode 100644 index 0000000..8d2e578 --- /dev/null +++ b/distro/CHANGELOG @@ -0,0 +1,25 @@ +v 2.4.0 (21-04-2015) + * upgrade mongo-java-driver to version 3.0.3 +v 2.2.0 (21-04-2015) + * change smp protocol + * id as remote identifier for RFile methods +v 2.1.2 (31-10-2014) + * added methods for folder and user storage calculation +v 2.1.0 (01-04-2014) + * added metadata for every remote file in agreement with storage-manager-trigger module +v 2.0.3 (24-10-2013) + * added mongoOption for reinforce nwtwork failover #245 openBio +v 2.0.2 (19-06-2013) + * bug fix input stream with replace flag #208 openBio +v 2.0.1 (29-05-2013) + * bug fix incident: connection pending #606 +v 2.0.0 (19-05-2013) + * added move, copy and link operations + *FWS integration +v 1.0.2 (15-01-2013) + * Integration with new gCube release 2.12.0 version system +v 1.0.1 (11-10-2012) + * added VOLATILE area +v. 1.0.0 (04-05-2012) + * First release + diff --git a/distro/LICENSE b/distro/LICENSE new file mode 100644 index 0000000..7bca8c8 --- /dev/null +++ b/distro/LICENSE @@ -0,0 +1,2 @@ +${gcube.license} + diff --git a/distro/README b/distro/README new file mode 100644 index 0000000000000000000000000000000000000000..01364909a470c33cee0e4adb91913ee14a7750fc GIT binary patch literal 1515 zcmbtUL2p_y3`Ux?1HZzmIJDDvyH`7`1rA6pngF$jX@`)Qht<4E` zhl6>UoFP}{zYoYk9M8?Y$?ki&?ClnbtQo3^`zrzQ-J%Kz0TH55UXn$qX&SiDE9FYG zzAS!M_5#Ss4+>z2QAf|uu=LzGG3Fr7M+`;7?Iy*Q=Wrh0PcZch@ zOudi$FF%y2L~CUtDb8AzS&y(n#-H`rKzDeO?Ys`0#yIK0+YzUL{4(IzI2zo@WUXI! zt3{I5YtIqRBY2xi5tcI$+Axg25{CF!K8$sA5l=JNht5z4NmcW z`<}&gM;cv_@8=<%*IOm{W!x%m#=$o61et6=sZg!!h+GAD_6aH_i-&|)3;TFu-HivM c$!zq(t|~g651mfutK5%r+cO>A4DUyO0N{(WCjbBd literal 0 HcmV?d00001 diff --git a/distro/changelog.xml b/distro/changelog.xml new file mode 100644 index 0000000..c4791f4 --- /dev/null +++ b/distro/changelog.xml @@ -0,0 +1,84 @@ + + + SSL enabled + + + fix throw RemoteBackendException in Resource class + + + add softcopy operation + upgrade to mongo-java-driver 3.6.0 + + + add duplicate file operation + + + fix: bug #5598 + + + Added method for retrieving the relative remote path by id + + + fix: bug #5625 + added suffix to url for object stored on VolatileArea + + + fix: http url with encode resolution + fix-pom: downgrade commons-io to version 1.4 + change: mongo authentication mechanism from CR to negotiation + + + clean code + added getHttpUrl method + + + code refactoring: new class MongoIO + move operation alternative version: using fs.files collection + add mimeType manager + add retry mechanism when retrieving remote objects + upgrade mongo-java-driver library to version 3.0.0 + + + enable smp uri without payload + + + change smp protocol + id as remote identifier for RFile methods + + + added methods for folder and user storage calculation + + + removed dependency from slf4j-noop + + + added metadata for every remote file in agreement with storage-manager-trigger module + + + Integration with new gCube release 2.17.0 version system + added operations on folder + + + Integration with new gCube release 2.17.0 version system + added mongoOption for reinforce nwtwork failover #245 openBio + + + Integration with new gCube release 2.15.0 version system + bugfix incident on pending connections #606 + + + Integration with new gCube release 2.14.0 version system + FWS integration + added move, copy and link operations + + + Integration with new gCube release 2.12.0 version system + + + Added VOLATILE area + + + First Release + + + diff --git a/distro/descriptor.xml b/distro/descriptor.xml new file mode 100644 index 0000000..e395580 --- /dev/null +++ b/distro/descriptor.xml @@ -0,0 +1,32 @@ + + servicearchive + + tar.gz + + / + + + ${distroDirectory} + / + true + + README + LICENSE + changelog.xml + profile.xml + + 755 + true + + + + + target/${build.finalName}.${project.packaging} + /${artifactId} + + + + \ No newline at end of file diff --git a/distro/profile.xml b/distro/profile.xml new file mode 100644 index 0000000..83f8c3d --- /dev/null +++ b/distro/profile.xml @@ -0,0 +1,25 @@ + + + + Service + + ${description} + ContentManagement + ${artifactId} + 1.0.0 + + + ${artifactId} + ${version} + + ${groupId} + ${artifactId} + ${version} + + + ${build.finalName}.jar + + + + + diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..d31a98f --- /dev/null +++ b/pom.xml @@ -0,0 +1,109 @@ + + 4.0.0 + + maven-parent + org.gcube.tools + 1.0.0 + + + org.gcube.contentmanagement + storage-manager-core + 2.9.0-SNAPSHOT + + ${project.basedir}/distro + + + scm:svn:http://svn.d4science.research-infrastructures.eu/gcube/trunk/content-management/${project.artifactId} + scm:svn:https://svn.d4science.research-infrastructures.eu/gcube/trunk/content-management/${project.artifactId} + http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/content-management/${project.artifactId} + + + + + + org.gcube.distribution + gcube-bom + LATEST + pom + import + + + + + + org.slf4j + slf4j-api + + + + org.mongodb + mongo-java-driver + 3.6.0 + + + + org.gcube.core + common-encryption + + + + commons-io + commons-io + 1.4 + + + commons-codec + commons-codec + 1.8 + + + + + + org.apache.maven.plugins + maven-resources-plugin + 2.5 + + + copy-profile + install + + copy-resources + + + target + + + ${distroDirectory} + true + + profile.xml + + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + + ${distroDirectory}/descriptor.xml + + + + + servicearchive + install + + single + + + + + + + \ No newline at end of file diff --git a/src/main/java/log4j.properties b/src/main/java/log4j.properties new file mode 100644 index 0000000..043a187 --- /dev/null +++ b/src/main/java/log4j.properties @@ -0,0 +1,15 @@ +log4j.rootLogger=INFO, A1, stdout +log4j.appender.A1=org.apache.log4j.RollingFileAppender +log4j.appender.A1.File=log.txt +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n +# ***** Max file size is set to 100KB +log4j.appender.A1.MaxFileSize=100MB +# ***** Keep one backup file +log4j.appender.A1.MaxBackupIndex=1 + +#CONSOLE +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Threshold=INFO +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%t] %-5p %c %d{dd MMM yyyy ;HH:mm:ss.SSS} - %m%n diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/coding/IEncode.java b/src/main/java/org/gcube/contentmanagement/blobstorage/coding/IEncode.java new file mode 100644 index 0000000..e700be7 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/coding/IEncode.java @@ -0,0 +1,48 @@ +package org.gcube.contentmanagement.blobstorage.coding; + +import java.io.InputStream; +import java.util.List; + +/** + * Interface for coding a generic File in bytes, or base64 code Used for terrastore system + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public interface IEncode { + /** + * Encode a generic file in byte array + * @param path file's path + * @param isChunk indicates if the file is chuncked + * @param isBase64 the base64 coding option + * @return a byte array + */ + public byte[] encodeGenericFile(String path, boolean isChunk, boolean isBase64); + + /** + * Reads a file storing intermediate data into a list. Fast method. + * @param path + * @param isChunk + * @param chunkDimension + * @return the list that contains the file + */ + public List encodeFileChunked(String path, boolean isChunk, int chunkDimension); + + /** + * Reads a file storing intermediate data into an array. + * @param in + * @param path + * @param isChunk + * @param chunkDimension + * @return the byte array that contains the file + */ + public byte[] encodeFileChunked2(InputStream in, String path, boolean isChunk, long chunkDimension); + + /** + * Decode a byte array in a File + * @param encode + * @param path + * @param isBase64 + */ + public void decodeByte2File(byte[] encode, String path, boolean isBase64); + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/report/Report.java b/src/main/java/org/gcube/contentmanagement/blobstorage/report/Report.java new file mode 100644 index 0000000..9d004de --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/report/Report.java @@ -0,0 +1,36 @@ +package org.gcube.contentmanagement.blobstorage.report; +/** + * Generic interface for accounting report + * @author Roberto Cirillo (ISTI-CNR) + * + */ +@Deprecated +public interface Report { + /** + * Set generic properties of report + * @param resourceType + * @param consumerId + * @param resourceOwner + * @param resourceScope + * @return + */ + public void init(String consumerId, String resourceScope); + /** + * set start time of the operation + * @return + */ + public void timeUpdate(); + + /** + * Set end time of operation and other specific properties + * @return + */ + public void ultimate(String owner, String uri, String operation, String size ); + + /** + * send report + * @return + */ + public void send(); + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportAccountingImpl.java b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportAccountingImpl.java new file mode 100644 index 0000000..99a6209 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportAccountingImpl.java @@ -0,0 +1,28 @@ +package org.gcube.contentmanagement.blobstorage.report; + + /** + * Void implementation of Report interface + * @author Roberto Cirillo (ISTI-CNR) + * + */ + @Deprecated + public class ReportAccountingImpl implements Report { + + @Override + public void init(String consumerId, String resourceScope) { + } + + + @Override + public void timeUpdate() { + } + + @Override + public void ultimate(String owner, String uri, String operation, String size ) { + } + + @Override + public void send() { + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportConfig.java b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportConfig.java new file mode 100644 index 0000000..0fbd57e --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportConfig.java @@ -0,0 +1,8 @@ +package org.gcube.contentmanagement.blobstorage.report; + +@Deprecated +public class ReportConfig { + +// /** Report type - used by : Report factory class */ + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportException.java b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportException.java new file mode 100644 index 0000000..dcf0abb --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportException.java @@ -0,0 +1,42 @@ +package org.gcube.contentmanagement.blobstorage.report; + + +/** + * Report Exception class + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class ReportException extends Exception { + + /** + * + */ + private static final long serialVersionUID = -7852250665598838026L; + private Exception exc = null; + + /** The no-arg constructor */ + public ReportException() { + } + + /** + * Construct a ReportException with an error message + * @param message the error message + */ + public ReportException(String message) { + super(message); + } + + public ReportException (Exception e) + { + this.setExc(e); + } + + public Exception getExc() { + return exc; + } + + public void setExc(Exception exc) { + this.exc = exc; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportFactory.java b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportFactory.java new file mode 100644 index 0000000..ed61fe1 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/report/ReportFactory.java @@ -0,0 +1,42 @@ +package org.gcube.contentmanagement.blobstorage.report; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + +/** + * + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class ReportFactory { + + final static Logger logger=LoggerFactory.getLogger(ReportFactory.class); + /** + *

Instantiate the class specified by user

+ * @return the Dao class + * @throws DAOException + */ + public static Report getReport(int ReportType) throws ReportException { + + Report report = null; + + try { + switch(ReportType) { + case Costants.ACCOUNTING_TYPE : + report = new ReportAccountingImpl(); + break; + default : + throw new ReportException("MyDAOFactory.getDAO: ["+ReportType+"] is an UNKNOWN TYPE !"); + } + logger.trace("ReportFactory.getDao : returning class ["+report.getClass().getName()+"]..."); + + } catch (Exception e) { + e.printStackTrace(); + throw new ReportException("ReportFactory.getReport: Exception while getting Report type : \n" + e.getMessage()); + } + + logger.trace("MyReportFactory.getReport : returning class ["+report.getClass().getName()+"]..."); + return report; + } +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/resource/AccessType.java b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/AccessType.java new file mode 100644 index 0000000..39a5e87 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/AccessType.java @@ -0,0 +1,14 @@ +package org.gcube.contentmanagement.blobstorage.resource; +/** + * define the kind of access to storage manager + * private: The file uploaded are visibility limited at the owner + * shared: the visibility is limited for all user that have the same serviceClass and serviceName + * public: the visibility is limited to all the infrastructured area + * + * @author Roberto Cirillo (ISTI-CNR) + * + */ + +public enum AccessType { + PUBLIC, SHARED, PRIVATE + } \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MemoryType.java b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MemoryType.java new file mode 100644 index 0000000..ad5fd95 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MemoryType.java @@ -0,0 +1,12 @@ +package org.gcube.contentmanagement.blobstorage.resource; + +/** + * Define the Memory type used for storage backend + * @author Roberto Cirillo (ISTI-CNR) + * + */ + +public enum MemoryType { + PERSISTENT, VOLATILE, BOTH +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MyFile.java b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MyFile.java new file mode 100644 index 0000000..0c745a3 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/MyFile.java @@ -0,0 +1,694 @@ +package org.gcube.contentmanagement.blobstorage.resource; + +import java.io.InputStream; +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Class that define a file entity object. This entity, contains file properties and metadata. + * This type of resource is builded by ServiceEngine class and used by the TransportManager for requests to the remote System + * This class contains also the definition of the current operation: + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class MyFile { + +// file name + private String name; +// owner + private String owner; +// payload Terrastore + private byte[] content; +// local path + private String localPath; +// remote path + private String remotePath; +// absolute remote path + private String absoluteRemotePath; +// num of chunks client side Terrastore + private int numChunks; +// name of the key in the remote bucket Terrastore + private String key; +// local directory + private String localDir; +// inputStream of the resource + private InputStream inputStream; +//outputStream of the resource + private OutputStream outputStream; +// type of stream + private String type; +//if true the file is locked + private boolean lock; +// the key for unlocked the file + private String lockedKey; + + private String remoteDir; + + private long lifeTime; + + private String id; + + private String id2; + + private long size; + + private String extension; + + private String creationTime; + + // parameters for GCube instance Url calculation + + private String serviceName; + private String serviceClass; + private String ownerGcube; + private String gcubeScope; + private AccessType gcubeAccessType; + private MemoryType gcubeMemoryType; + + /** + * define the operation type on the current resource + */ + private OperationDefinition operation; + private String resolverHost; + private boolean forceCreation; + private String mimeType; + private String genericPropertyField; + private String genericPropertyValue; + private String passPhrase; + + private String writeConcern; + private String readPreference; + private String rootPath; + private boolean replace=false; + final Logger logger = LoggerFactory.getLogger(MyFile.class); + + public MyFile(boolean lock){ + setLock(lock); + } + + /** + * set some properties on the current resource + * @param author author name + * @param name name of the file + * @param pathClient local path of the file + */ + public MyFile(String author, String name, String pathClient, MemoryType memoryType){ + this.setOwner(author); + this.setName(name); + this.setLocalPath(pathClient); + setGcubeMemoryType(memoryType); + } + + /** + * set some properties on the current resource + * @param author author name + * @param name name of the file + * @param pathClient local path of the file + * @param pathServer remote path of the file + */ + + public MyFile(String author, String name, String pathClient, String pathServer, MemoryType memoryType){ + this.setOwner(author); + this.setName(name); + this.setLocalPath(pathClient); + this.setRemotePath(pathServer); + setGcubeMemoryType(memoryType); + } + + public MyFile(MemoryType memoryType) { + setGcubeMemoryType(memoryType); + + } + + /** + * build a new object with only the name setted + * @param name file name + */ + public MyFile(String name, MemoryType memoryType){ + setName(name); + setGcubeMemoryType(memoryType); + } + + /** + * get number of chunks if the file is splitted in chunks + * @return number of chunks + */ + public int getNumChunks() { + return numChunks; + } + + /** + * set the number of file chunks. default is 1 + * @param numChunks + */ + public void setNumChunks(int numChunks) { + this.numChunks = numChunks; + } + + /** + * get the local path of the resource + * @return local path + */ + public String getLocalPath() { + return localPath; + } + + /** + * set the local path of the resource + * @param path the absolute path of the resource + */ + public void setLocalPath(String path) { + this.localPath = path; + } + + /** + * get the file name + * @return file name + */ + public String getName() { + return name; + } + + /** + * set the file name + * @param name file name + */ + public void setName(String name) { + this.name = name; + } + + /** + * get the file owner + * @return file owner + */ + public String getOwner() { + return owner; + } + + /** + * set the file owner + * @param author file owner + */ + public void setOwner(String author) { + this.owner = author; + } + + /** + * get the file payload or null + * + * @return a byte array that contains the file payload + */ + public byte[] getContent() { + return content; + } + + /** + * set the payload file + * @param currentChunk payload file + */ + public void setContent(byte[] currentChunk) { + this.content = currentChunk; + } + /** + * used only for chunk files. indicates the name of the current chunk + * @return the name of the current chunk + */ + public String getKey() { + return key; + } + + /** + * used only for chunk files. indicates the name of the current chunk + * @param key chunk name + */ + public void setKey(String key) { + this.key = key; + } + + /** + * returns a copy of the current resource + * @return the file copy + */ + public MyFile copyProperties(){ + MyFile dest=new MyFile(getGcubeMemoryType()); + dest.setOwner(getOwner()); + dest.setLocalDir(this.getLocalDir()); + dest.setRemoteDir(this.getRemoteDir()); + dest.setKey(this.key); + dest.setName(this.name); + dest.setNumChunks(this.numChunks); + dest.setLocalPath(this.localPath); + dest.setRemotePath(this.remotePath); + return dest; + } + + /** + * get the remote path of the resource + * @return remote path + */ + public String getRemotePath() { + return remotePath; + } + + /** + * set the remote path of the resource + * @param pathServer remote path + */ + public void setRemotePath(String pathServer) { + this.remotePath = pathServer; + } + + /** + * get the inputStream of the resource + * @return inputStream of the resource + */ + public InputStream getInputStream() { + return inputStream; + } + + /** + * set the inputStream of the resource + * @param inputStream inputStream of the resource + */ + public void setInputStream(InputStream inputStream) { + this.inputStream = inputStream; + } + + /** + * get the outputStream of the resource + * @return outputStream associated to the resource + */ + public OutputStream getOutputStream() { + return outputStream; + } + + /** + * set the outputStream of the resource + * @param outputStream outputstream associated to the resource + */ + public void setOutputStream(OutputStream outputStream) { + this.outputStream = outputStream; + } + + /** + * if the resource will be locked + * @return true if is lock + */ + public boolean isLock() { + return lock; + } + + /** + * set locking on the resource + * @param lock + */ + public void setLock(boolean lock) { + this.lock = lock; + } + + /** + * get the object type of the resource + * @return the class type of the resource + */ + public String getType() { + return type; + } + + /** + * set the object type of the resource + * @param type + */ + public void setType(String type) { + this.type = type; + } + + /** + * get the local direcotry where is the file + * @return the local directory + */ + public String getLocalDir() { + return localDir; + } + + /** + * set the local direcotry where is the file or the origin directory in case of move or copy operations + * @param localDir + */ + public void setLocalDir(String localDir) { + this.localDir = localDir; + } + + /** + * get the remote directory where the resource will be stored or the destination directory in case of copy, move operations + * @return the remote directory + */ + public String getRemoteDir() { + return remoteDir; + } + + /** + * set the remote directory where the resource will be stored + * @param remoteDir the remote directory + */ + public void setRemoteDir(String remoteDir) { + this.remoteDir = remoteDir; + } + + /** + * get the lock key or null + * @return the lock key + */ + public String getLockedKey() { + return lockedKey; + } + + /** + * set the lock key + * @param lockedKey lock key + */ + public void setLockedKey(String lockedKey) { + this.lockedKey = lockedKey; + } + + /** + * get the serviceName associated to the resource. This is need for build the remote root path. + * @return the serviceName + */ + public String getServiceName() { + return serviceName; + } + + /** + * set the serviceName associated to the resource. This is need for build the remote root path. + * @param serviceName serviceName associated to the resource + */ + public void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + + /** + * get the serviceClass associated to the resource. This is need for build the remote root path. + * @return service class + */ + public String getServiceClass() { + return serviceClass; + } + + /** + * set the serviceClass associated to the resource. This is need for build the remote root path. + * @param serviceClass serviceClass associated to the resource + */ + public void setServiceClass(String serviceClass) { + this.serviceClass = serviceClass; + } + + /** + * get the file owner + * @return the file owner + */ + public String getOwnerGcube() { + return ownerGcube; + } + + /** + * set the file owner + * @param ownerGcube file owner + */ + public void setOwnerGcube(String ownerGcube) { + this.ownerGcube = ownerGcube; + } + + /** + * get gCube scope, is need for build the remote root path + * @return gcube scope string + */ + public String getGcubeScope() { + return gcubeScope; + } + + /** + * set the gCube scope + * @param gcubeScope gcube scope + */ + public void setGcubeScope(String gcubeScope) { + this.gcubeScope = gcubeScope; + } + + /** + * get the gcube accessType: PRIVATE, SHARED, PUBLIC + * @return gcube access type + * + */ + public AccessType getGcubeAccessType() { + return gcubeAccessType; + } + + /** + * set the gcube accessType: PRIVATE, SHARED, PUBLIC + * @param gcubeAccessType + */ + public void setGcubeAccessType(AccessType gcubeAccessType) { + this.gcubeAccessType = gcubeAccessType; + } + + /** + * get the gcube memoryType: PERSISTENT, VOLATILE + * @return the memory type + */ + public MemoryType getGcubeMemoryType() { + return gcubeMemoryType; + } + + /** + * set the gcube memoryType: PERSISTENT, VOLATILE + * @param gcubeMemoryType + */ + public void setGcubeMemoryType(MemoryType gcubeMemoryType) { + this.gcubeMemoryType = gcubeMemoryType; + } + + /** + * set the kind of operation + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition + * @param operation operation type + */ + public void setOperation(OperationDefinition operation) { + this.operation = operation; + } + + /** + * set the kind of operation + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setOperation(OPERATION) + * @param operation + */ + public void setOperation(OPERATION operation) { + this.operation = new OperationDefinition(operation); + } + + /** + * get the kind of operation + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition + * @return the operation definition on this resource + */ + public OperationDefinition getOperationDefinition(){ + return operation; + } + + /** + * get the local resource identifier + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#getLocalResource() + * @return the local Resource identifier + */ + public LOCAL_RESOURCE getLocalResource() { + return operation.getLocalResource(); + } + + /** + * set the local resource identifier + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setLocalResource(LOCAL_RESOURCE) + * @param localResource local resource identifier + */ + public void setLocalResource(LOCAL_RESOURCE localResource) { + if(operation==null) + operation=new OperationDefinition(OPERATION.VOID); + operation.setLocalResource(localResource); + } + + /** + * get the remote resource identifier + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#getRemoteResource() + * @return the remote Resource identifier + */ + public REMOTE_RESOURCE getRemoteResource() { + return operation.getRemoteResource(); + } + + /** + * set the remote resource identifier + * @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setRemoteResource(REMOTE_RESOURCE) + * @param remoteResource local resource identifier */ + public void setRemoteResource(REMOTE_RESOURCE remoteResource) { + if(operation==null) + operation=new OperationDefinition(OPERATION.VOID); + operation.setRemoteResource(remoteResource); + } + + public String getAbsoluteRemotePath() { + return absoluteRemotePath; + } + + public void setAbsoluteRemotePath(String absoluteRemotePath) { + this.absoluteRemotePath = absoluteRemotePath; + } + + public long getLifeTime() { + return lifeTime; + } + + public void setLifeTime(long lifeTime) { + this.lifeTime = lifeTime; + } + + public OperationDefinition getOperation() { + return operation; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + public String getExtension() { + return extension; + } + + public void setExtension(String extension) { + this.extension = extension; + } + + public String getCreationTime() { + return creationTime; + } + + public void setCreationTime(String creationTime) { + this.creationTime = creationTime; + } + + public void setResolverHost(String resolverHost) { + this.resolverHost=resolverHost; + + } + + public String getResolverHOst() { + return resolverHost; + } + + public void forceCreation(boolean forceCreation) { + this.forceCreation=forceCreation; + + } + + public boolean isForceCreation(){ + return this.forceCreation; + } + + public String getMimeType(){ + return this.mimeType; + } + + public void setMimeType(String mime) { + this.mimeType=mime; + + } + + public String getGenericPropertyField() { + return genericPropertyField; + } + + public void setGenericPropertyField(String genericPropertyField) { + this.genericPropertyField = genericPropertyField; + } + + public String getGenericPropertyValue() { + return genericPropertyValue; + } + + public void setGenericPropertyValue(String genericPropertyValue) { + this.genericPropertyValue = genericPropertyValue; + } + + public String getPassPhrase() { + return passPhrase; + } + + public void setPassPhrase(String passPhrase) { + this.passPhrase = passPhrase; + } + + public String getWriteConcern() { + return writeConcern; + } + + public void setWriteConcern(String writeConcern) { + this.writeConcern = writeConcern; + } + + public String getReadPreference() { + return readPreference; + } + + public void setReadPreference(String readConcern) { + this.readPreference = readConcern; + } + + public void setRootPath(String rootPath) { + this.rootPath=rootPath; + + } + + public String getRootPath(){ + return rootPath; + } + + public void setReplaceOption(boolean replace) { + this.replace=replace; + + } + + public boolean isReplace(){ + return replace; + } + + public void print(){ + logger.info("\n Object: \n\t path: "+this.getRemotePath()+ "\n\t id: "+this.getId()); + } + + public String getId2() { + return id2; + } + + public void setId2(String id2) { + this.id2 = id2; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/resource/OperationDefinition.java b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/OperationDefinition.java new file mode 100644 index 0000000..a5e4cf4 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/OperationDefinition.java @@ -0,0 +1,140 @@ +package org.gcube.contentmanagement.blobstorage.resource; + +/** + * Defines the identity of a remote operation. + * The enumerations: OPERATION, LOCAL_RESOURCE and REMOTE_RESOURCE, contains all you need to identify the kind of operation + * ex: + * + * + * if the operation is defined in this way: + * + * + * OPERATION: UPLOAD; + * LOCAL_RESOURCE: PATH; + * REMOTE_RESOURCE: PATH; + * + * + * It means that the client would be upload a file that have an absolute local path defined in pathClient field, + * on the remote location identifies by pathServer field of the resource MyFile + * @see org.gcube.contentmanagement.blobstorage.resource.MyFile + * + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class OperationDefinition { + + /** + * Indicates the type of current operation + * + */ + public enum OPERATION {UPLOAD,DOWNLOAD, REMOVE, REMOVE_DIR, SHOW_DIR, GET_URL, UNLOCK, GET_TTL, RENEW_TTL, GET_SIZE, VOID, LOCK, COPY, COPY_DIR, LINK, MOVE, MOVE_DIR, GET_META_FILE, GET_TOTAL_USER_VOLUME, GET_USER_TOTAL_ITEMS, GET_FOLDER_TOTAL_VOLUME, GET_FOLDER_TOTAL_ITEMS, GET_FOLDER_LAST_UPDATE, CLOSE, GET_META_INFO, SET_META_INFO, GET_HTTP_URL, GET_HTTPS_URL, GET_REMOTE_PATH, EXIST, DUPLICATE, SOFT_COPY} + + + /** + * Indicates how the local resource is identifies + * + */ + public enum LOCAL_RESOURCE {INPUT_STREAM, OUTPUT_STREAM, PATH, VOID, ID} + + /** + * Indicates how the remote resource is identifies + * + */ + public enum REMOTE_RESOURCE {INPUT_STREAM, OUTPUT_STREAM, PATH, VOID, PATH_FOR_INPUT_STREAM, PATH_FOR_OUTPUTSTREAM, ID, DIR} + + /** + * Indicates the type of current operation + */ + private OPERATION operation; + + /** + * Indicates how the local resource is identifies + */ + private LOCAL_RESOURCE localResource; + + /** + * Indicates how the remote resource is identifies + */ + private REMOTE_RESOURCE remoteResource; + + + /** + * Set the complete operation definition + * @param op operation type + * @param lr local resource type + * @param rr remote resource type + */ + public OperationDefinition(OPERATION op, LOCAL_RESOURCE lr, REMOTE_RESOURCE rr){ + setOperation(op); + setLocalResource(lr); + setRemoteResource(rr); + + } + + /** + * Set the operation definition without specifies the loal resource and the remote resource + * @param op operation type + */ + public OperationDefinition(OPERATION op){ + setOperation(op); + setLocalResource(LOCAL_RESOURCE.VOID); + setRemoteResource(REMOTE_RESOURCE.VOID); + } + + + /** + * Get the operation type + * @return the operation type + */ + public OPERATION getOperation() { + return operation; + } + + /** + * set the operation type + * @param operation operation type + */ + public void setOperation(OPERATION operation) { + this.operation = operation; + } + + /** + * get the local resource type + * @return the local resource type + */ + public LOCAL_RESOURCE getLocalResource() { + return localResource; + } + + /** + * set the local resource type + * @param localResource local resource type + */ + public void setLocalResource(LOCAL_RESOURCE localResource) { + this.localResource = localResource; + } + + /** + * get the remote resource type + * @return the remote resource type + */ + public REMOTE_RESOURCE getRemoteResource() { + return remoteResource; + } + + /** + * set the remote resource type + * @param remoteResource + */ + public void setRemoteResource(REMOTE_RESOURCE remoteResource) { + this.remoteResource = remoteResource; + } + + @Override + public String toString() { + return "OperationDefinition [operation=" + operation + + ", localResource=" + localResource + ", remoteResource=" + + remoteResource + "]"; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/resource/StorageObject.java b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/StorageObject.java new file mode 100644 index 0000000..073bd69 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/resource/StorageObject.java @@ -0,0 +1,76 @@ +package org.gcube.contentmanagement.blobstorage.resource; + + +/** + * Class that define a entity object (a file or a directory). + * This entity, contains file properties and methods for the client queries + * This type of resource is builded by Transportmanager for answer the client + * ex: if the customer asks for the contents of a remote folder. It will be returned a List of StorageObject + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class StorageObject { + + private String type; + private String name; + private String owner; + private String creationTime; + private String id; + + public String getCreationTime() { + return creationTime; + } + + public void setCreationTime(String creationTime) { + this.creationTime = creationTime; + } + + public StorageObject(String name, String type){ + setType(type); + setName(name); + } + + public StorageObject(String name, String type, String owner, String creationTime){ + setType(type); + setName(name); + setOwner(owner); + setCreationTime(creationTime); + } + + public boolean isDirectory() { + return type.equalsIgnoreCase("dir"); + } + + public boolean isFile() { + return type.equalsIgnoreCase("file"); + } + + + private void setType(String type) { + this.type = type; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + this.owner = owner; + } + + public void setId(String id) { + this.id=id; + + } + + public String getId(){ + return this.id; + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/IClient.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/IClient.java new file mode 100644 index 0000000..17fa074 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/IClient.java @@ -0,0 +1,262 @@ +package org.gcube.contentmanagement.blobstorage.service; + + +import org.gcube.contentmanagement.blobstorage.service.impl.AmbiguousResource; +import org.gcube.contentmanagement.blobstorage.service.impl.LocalResource; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResource; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceBoolean; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceComplexInfo; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceFolderInfo; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceInfo; +import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceSource; + + +/** + * User interface. + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public interface IClient { +/** + * + * Method for upload + */ + public abstract LocalResource get(); + + +/** + * + * Method for locking a remote resource (file) + */ +public abstract AmbiguousResource lock(); +/** + * + * Method for ask file dimension + */ +public abstract RemoteResourceInfo getSize(); + + +/** + * Method for the download + * @param replace indicates if the file must be replaced if this is present in the storage + * @return LocalResource object + */ +public abstract LocalResource put(boolean replace); + + +/** +* Method for the download +* @param replace indicates if the file must be replaced if this is present in the storage +* @param file mimetype +* @return LocalResource object +*/ +public abstract LocalResource put(boolean replace, String mimeType); + + +/** + * + * Method for unlocking a remote resource + */ +public abstract AmbiguousResource unlock(String key); + +/** + * TTl query + * @return the TTL left in ms for a remote resource if it is locked + */ +public abstract RemoteResourceInfo getTTL(); + +/** + * + * Remove a remote resource from the storage Sytem + * @return RemoteResource object + */ +public abstract RemoteResource remove(); + +/** + *Show all the objects in a specified remote folder + * @return RemoteResource object + */ +public RemoteResource showDir(); + +/** + * + * remove a folder from the storage System + * @return RemoteResource object + */ +public RemoteResource removeDir(); + + +/** + * renew a TTL for a specific resource. This operation is allowed a limited number of times + * @return RemoteResourceInfo object + */ +public RemoteResourceInfo renewTTL(String key); + +/** + * + * @return RemoteResource object + */ +RemoteResource getUrl(); + +/** + * Link a file from remote resource to another new remote resource. If the new remote resource exist, + * this resource will be removed and replaced with the new resource + * @return RemoteResource object + */ +public RemoteResourceSource linkFile(); + + +/** + * Copy a file from remote resource to another new remote resource. If the new remote resource exist, + * this resource will be removed and replaced with the new resource + * @return RemoteResource object + */ +public RemoteResourceSource copyFile(); + + +/** + * Copy a file from remote resource to another new remote resource. If the new remote resource exist, + * this resource will be removed and replaced with the new resource + * @return RemoteResource object + */ +public RemoteResourceSource copyFile(boolean replace); +public RemoteResourceSource copyFile(String backendType); +public RemoteResourceSource copyFile(String backendType, boolean replaceOption); + +/** + * Move a file from remote resource to another new remote resource. If the new remote resource exist, + * this resource will be removed and replaced with the new resource + * @return RemoteResource object + */ +public RemoteResourceSource moveFile(); + + +public LocalResource get(String backendType); + + +public RemoteResourceInfo getSize(String backendType); + +public RemoteResourceFolderInfo getFolderTotalVolume(); + +public RemoteResourceFolderInfo getFolderTotalItems(); + +public String getTotalUserVolume(); + +public String getUserTotalItems(); + +public RemoteResourceFolderInfo getFolderLastUpdate(); + +public RemoteResource remove(String backendType); + + +public RemoteResource showDir(String backendType); + + +public RemoteResource removeDir(String backendType); + + +public RemoteResource getUrl(String backendType); + + +public RemoteResourceInfo getTTL(String backendType); + + +public AmbiguousResource unlock(String key, String backendType); + + +public RemoteResourceInfo renewTTL(String key, String backendType); + + +public RemoteResourceSource linkFile(String backendType); + + +//public RemoteResourceSource copyFile(String backendType); + + +public RemoteResource duplicateFile(); + + +public RemoteResource duplicateFile(String backendType); + +public RemoteResourceSource softCopy(); +public RemoteResourceSource softCopy(boolean replace); +public RemoteResourceSource softCopy(String backendType); +public RemoteResourceSource softCopy(String backendType, boolean replaceOption); + + + +public RemoteResourceSource moveFile(String backendType); + + +public RemoteResourceSource moveDir(String backendType); + + +public RemoteResourceSource moveDir(); + + +public RemoteResourceSource copyDir(String backendType); + + +public RemoteResourceSource copyDir(); + + +public RemoteResourceComplexInfo getMetaFile(); + +/** + * close the connections to backend storage system + */ +public void close(); + + +public RemoteResource getUrl(boolean forceCreation); + +public RemoteResource getUrl(String backendType, boolean forceCreation); + +public RemoteResource getMetaInfo(String field); + +public RemoteResource getMetaInfo(String field, String backendType); + +public RemoteResource setMetaInfo(String field, String value); + +public RemoteResource setMetaInfo(String field, String value, String backendType); + +public String getId(String id); + +public RemoteResource getRemotePath(); + +public RemoteResource getHttpUrl(boolean forceCreation); + +public RemoteResource getHttpUrl(String backendType, boolean forceCreation); + +public RemoteResource getHttpUrl(String backendType); + +public RemoteResource getHttpUrl(); + +public RemoteResource getHttpsUrl(boolean forceCreation); + +public RemoteResource getHttpsUrl(String backendType, boolean forceCreation); + +public RemoteResource getHttpsUrl(String backendType); + +public RemoteResource getHttpsUrl(); + + +public void setWriteConcern(String write); + + +public void setReadConcern(String read); + + +public void setOwner(String owner); + +public void setDbNames(String [] dbs); + +/** + * + * Method for checking if the file exist + */ +public abstract RemoteResourceBoolean exist(); + +public abstract RemoteResourceBoolean exist(String backendType); + +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/BucketCoding.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/BucketCoding.java new file mode 100644 index 0000000..c98dde8 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/BucketCoding.java @@ -0,0 +1,188 @@ +package org.gcube.contentmanagement.blobstorage.service.directoryOperation; + +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class contains all the coding and decoding for a bucket name: + * bucketId: TO DO + * bucketName (if is a dir): + * bucketName (if is a file): + * + * @author Roberto Cirillo (ISTI - CNR) + */ +public class BucketCoding { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(BucketCoding.class); + + /** + * Coding the name of a file object in a file-type bucket + * @param path the path on the cluster + * @param author the file's owner + * @return the bucketName coded + */ + public String bucketFileCoding(String path, String rootArea) { + logger.debug("Coding name: path: "+path+" rootArea "+rootArea); +// if(!ObjectId.isValid(path)){ + String absolutePath =path; + if(rootArea.length()>0){ + absolutePath = mergingPathAndFile(rootArea, path); + path=absolutePath; + } + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + absolutePath=absolutePath.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + } + logger.debug("coding name done"); +// } + return path; + } + + /** + * rootArea + path formed an absolute path + * + * @param path remote relative path + * @param rootArea remote root path + * @return absolute remote path + */ + public String mergingPathAndDir(String rootArea, String path ) { + char c=rootArea.charAt(rootArea.length()-1); + if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + rootArea=rootArea.substring(0, rootArea.length()-1); + } + c=path.charAt(0); + if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=Costants.FILE_SEPARATOR+path; + } + c=path.charAt(path.length()-1); + if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=path+Costants.FILE_SEPARATOR; + } + String bucketName=rootArea+path; + return bucketName; + } + + /** + * check and correct the directory format + * @param path remote dir path + * @return remote dir path + */ + public String checkSintaxDir(String path ) { + char c=path.charAt(0); + if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=Costants.FILE_SEPARATOR+path; + } + c=path.charAt(path.length()-1); + if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=path+Costants.FILE_SEPARATOR; + } + String bucketName=path; + return bucketName; + } + + /** + * rootArea + path formed an absolute path + * + * @param path relative path + * @param rootArea root path + * @return complete path + */ + private String mergingPathAndFile(String rootArea, String path ) { + char c=rootArea.charAt(rootArea.length()-1); + if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + rootArea=rootArea.substring(0, rootArea.length()-1); + } + if(path == null) return null; + c=path.charAt(0); + if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=Costants.FILE_SEPARATOR+path; + } + c=path.charAt(path.length()-1); + if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){ + path=path.substring(0, path.length()-1); + } + String bucketName=rootArea+path; + return bucketName; + } + + /** + * + * Decoding the name of a file object in a file-type bucket + * @param key relative remote path + * @return complete remote path + */ + public String bucketFileDecoding(String key, String rootArea) { + String nameDecoded=key; + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + String[] splits=key.split(Costants.SEPARATOR); + nameDecoded=splits[splits.length-1]; + } + if (logger.isDebugEnabled()) { + logger.debug("decodeBucketFile(String) - end"); + } + return nameDecoded; + } + + /** + * Coding the name of a directory object in a directory-type bucket + * @param author file owner + * @param dir remote directory + * @return the complete remote path + */ + public String bucketDirCoding(String dir, String rootArea) { + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + dir=dir.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + } + dir=mergingPathAndDir(rootArea, dir); + return dir; + } + + /** + * Decoding the name in a directory-type bucket. + * In a directory type bucket you can found or a file object or a directory object + * @param key remote path + * @return the remote path + */ + public String bucketDirDecoding(String key, String rootArea) { + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + String lastChar=key.substring(key.length()-3); + // if is a dir object + if(lastChar.equalsIgnoreCase(Costants.SEPARATOR)){ + String[] extractPath=key.split(Costants.SEPARATOR); + String[] rootPath= rootArea.split(Costants.FILE_SEPARATOR); + key=""; + for(int i=rootPath.length;i map=null; + try { + map = tm.getValues(resource, bucketList[i], DirectoryEntity.class); + } catch (MongoException e) { + tm.close(); + e.printStackTrace(); + } + Set keys=map.keySet(); + for(Iterator it=keys.iterator(); it.hasNext();){ + String key=(String)it.next(); + if(key.equalsIgnoreCase(bucketDirCoded)){ + if(logger.isDebugEnabled()) + logger.debug("key matched: "+key+" remove"); + //recursively remove + try { + map=tm.getValues(resource, key, DirectoryEntity.class); + } catch (MongoException e) { + tm.close(); + e.printStackTrace(); + } + keys=map.keySet(); + for(Iterator it2=keys.iterator(); it2.hasNext();){ + String key2=(String)it2.next(); + if(logger.isDebugEnabled()) + logger.debug("the bucket: "+key+" have a son: "+key2); + if(bc.isFileObject(key2)){ + if(logger.isDebugEnabled()){ + logger.debug("remove "+key2+" in the bucket: "+key); + } + if(logger.isDebugEnabled()) + logger.debug("remove all keys in the bucket: "+key2); + try { + tm.removeRemoteFile(key2, resource); + } catch (UnknownHostException e) { + tm.close(); + e.printStackTrace(); + } catch (MongoException e) { + tm.close(); + e.printStackTrace(); + } + }else{ + if(logger.isDebugEnabled()) + logger.debug(key2+" is a directory"); + String bucketDecoded=bc.bucketDirDecoding(key2, rootArea); + removeDirBucket(resource, bucketDecoded, rootArea, backendType, dbNames); + } + + } + if(logger.isDebugEnabled()) + logger.debug("remove "+key+" in the bucket: "+bucketList[i]); + if(logger.isDebugEnabled()) + logger.debug("remove all keys in the bucket: "+key); + try { + tm.removeRemoteFile(key, resource); + } catch (UnknownHostException e) { + tm.close(); + e.printStackTrace(); + } catch (MongoException e) { + tm.close(); + e.printStackTrace(); + } + } + } + } + } + return bucketDirCoded; + } + + + /** + * recursively search on directories buckets, return a key if found else return null + * @param name fileName + * @param bucketCoded bucketName coded + * @param tm a client for the cluster + */ + public String searchInBucket(MyFile resource, String name, String bucketCoded, + TransportManager tm, String rootArea) { + Map dirs=null; + try{ + dirs=tm.getValues(resource, bucketCoded, DirectoryEntity.class); + }catch(Exception e){ + tm.close(); + logger.info("object not found"); + return null; + } + + Set set=dirs.keySet(); + for(Iterator it= set.iterator(); it.hasNext();){ + String key=(String)it.next(); + if(logger.isDebugEnabled()) + logger.debug("try in "+key); + String nameDecoded = new BucketCoding().bucketFileDecoding(key, rootArea); + if(logger.isDebugEnabled()) + logger.debug("name decoded: "+nameDecoded+" name searched is: "+name); + if((nameDecoded!=null ) && (nameDecoded.equalsIgnoreCase(name))){ + if(logger.isDebugEnabled()) + logger.debug("FOUND in "+bucketCoded+" objectId returned: "+key); + return key; + }else{ + searchInBucket(resource,name, key, tm, rootArea); + } + } + return null; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/DirectoryEntity.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/DirectoryEntity.java new file mode 100644 index 0000000..bab7704 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/DirectoryEntity.java @@ -0,0 +1,80 @@ +package org.gcube.contentmanagement.blobstorage.service.directoryOperation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; + + +/** + * Is used only by terrastore client: Define a directory object. + * Is useful for the operations on the directory tree + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class DirectoryEntity { + + private String directory; + private String name; + private String author; + private String a; + private String b; + private String c; + private String d; + private String e; + + + public String getDirectory() { + return directory; + } + + public void setDirectory(String directory) { + this.directory = directory; + } + + public DirectoryEntity(){ + + } + + public DirectoryEntity(String dir, String author){ + setDirectory(dir); + setAuthor(author); + } + + public DirectoryEntity(String dir, String author, MyFile file){ + setDirectory(dir); + setAuthor(author); + } + + public String getAuthor() { + return author; + } + + public void setAuthor(String author) { + this.author = author; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int setGenericVariable(String name, String value){ + int codeError=0; + if(a==null){ + a=name+"%"+value; + }else if(b==null){ + b=name+"%"+value; + }else if(c==null){ + c=name+"%"+value; + }else if(d==null){ + d=name+"%"+value; + }else if(e==null){ + e=name+"%"+value; + }else + codeError=-1; + return codeError; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/Encrypter.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/Encrypter.java new file mode 100644 index 0000000..3e46edc --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/directoryOperation/Encrypter.java @@ -0,0 +1,129 @@ +package org.gcube.contentmanagement.blobstorage.service.directoryOperation; + +import java.io.UnsupportedEncodingException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.spec.KeySpec; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.DESKeySpec; +import javax.crypto.spec.DESedeKeySpec; +import org.gcube.common.encryption.StringEncrypter; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + + +/** + * This class can be used to encrypt and decrypt using DES and a given key + *@author Roberto Cirillo (ISTI-CNR) + * + */ +public class Encrypter { + + private KeySpec keySpec; + private SecretKeyFactory keyFactory; + private static final String UNICODE_FORMAT = "UTF8"; + + @Deprecated + public Encrypter( String encryptionScheme ) throws EncryptionException + { + this( encryptionScheme, null ); + } + + public Encrypter( String encryptionScheme, String encryptionKey ) + throws EncryptionException + { + + if ( encryptionKey == null ) + throw new IllegalArgumentException( "encryption key was null" ); + if ( encryptionKey.trim().length() < 24 ) + throw new IllegalArgumentException( + "encryption key was less than 24 characters" ); + + try + { + byte[] keyAsBytes = encryptionKey.getBytes( UNICODE_FORMAT ); + + if ( encryptionScheme.equals( Costants.DESEDE_ENCRYPTION_SCHEME) ) + { + keySpec = new DESedeKeySpec( keyAsBytes ); + } + else if ( encryptionScheme.equals( Costants.DES_ENCRYPTION_SCHEME ) ) + { + keySpec = new DESKeySpec( keyAsBytes ); + } + else + { + throw new IllegalArgumentException( "Encryption scheme not supported: " + + encryptionScheme ); + } + + keyFactory = SecretKeyFactory.getInstance( encryptionScheme); + } + catch (InvalidKeyException e) + { + throw new EncryptionException( e ); + } + catch (UnsupportedEncodingException e) + { + throw new EncryptionException( e ); + } + catch (NoSuchAlgorithmException e) + { + throw new EncryptionException( e ); + } + } + + /** + * Encrypt a string + * @param unencryptedString string to encrypt + * @return encrypted string + * @throws EncryptionException + */ + public String encrypt( String unencryptedString ) throws EncryptionException + { + if ( unencryptedString == null || unencryptedString.trim().length() == 0 ) + throw new IllegalArgumentException( + "unencrypted string was null or empty" ); + try + { + SecretKey key = keyFactory.generateSecret( keySpec ); + return StringEncrypter.getEncrypter().encrypt(unencryptedString, key);//t(unencryptedString, key); + } + catch (Exception e) + { + throw new EncryptionException( e ); + } + } + + /** + * decrypt a string + * @param encryptedString encrypted string + * @return decrypted string + * @throws EncryptionException + */ + public String decrypt( String encryptedString ) throws EncryptionException + { + if ( encryptedString == null || encryptedString.trim().length() <= 0 ) + throw new IllegalArgumentException( "encrypted string was null or empty" ); + + try + { + SecretKey key = keyFactory.generateSecret( keySpec ); + return StringEncrypter.getEncrypter().decrypt(encryptedString, key); + } + catch (Exception e) + { + throw new EncryptionException( e ); + } + } + + + @SuppressWarnings("serial") + public static class EncryptionException extends Exception + { + public EncryptionException( Throwable t ) + { + super( t ); + } + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/AmbiguousResource.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/AmbiguousResource.java new file mode 100644 index 0000000..9f21fd4 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/AmbiguousResource.java @@ -0,0 +1,33 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE; +/** + * This class is used from methods that can have both a RemoteResource or a LocalResource + * + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class AmbiguousResource extends RemoteResource { + + public AmbiguousResource(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * define local resource + * @param path : local absolute path of resource + * @return remoteResource object + */ + public RemoteResource LFile(String path){ + if(getMyFile() != null){ + getMyFile().setLocalPath(path); + }else{ + setMyFile(setGenericProperties("", "", path, "local")); + getMyFile().setLocalPath(path); + } + getMyFile().setLocalResource(LOCAL_RESOURCE.PATH); + return new RemoteResource(getMyFile(), getEngine()); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/LocalResource.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/LocalResource.java new file mode 100644 index 0000000..c37e740 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/LocalResource.java @@ -0,0 +1,139 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import java.io.InputStream; +import java.io.OutputStream; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; + + +/** + * Defines the operations for selecting a local resource. + * ex. a local path for a download operation, or a inputStream + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class LocalResource extends Resource{ + + + public LocalResource(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * define local resource + * @param path : local absolute path of resource + * @return remoteResource object + */ + public RemoteResource LFile(String path){ + if(getMyFile() != null){ + getMyFile().setLocalPath(path); + }else{ + setMyFile(setGenericProperties("", "", path, "local")); + getMyFile().setLocalPath(path); + } + getMyFile().setLocalResource(LOCAL_RESOURCE.PATH); + return new RemoteResource(getMyFile(), getEngine()); + } + + /** + * define local resource + * @param is : inputStream of resource + * @return remoteResource object + */ + public RemoteResource LFile(InputStream is) { + if(getMyFile() != null){ + getMyFile().setInputStream(is); + }else{ + setMyFile(new MyFile(engine.getGcubeMemoryType())); + getMyFile().setInputStream(is); + } + getMyFile().setLocalResource(LOCAL_RESOURCE.INPUT_STREAM); + return new RemoteResource(getMyFile(), getEngine()); + } + + /** + * define local resource + * @param os output stream of resource + * @return remoteResource object + */ + public RemoteResource LFile(OutputStream os) { + if(getMyFile() != null){ + getMyFile().setOutputStream(os); + }else{ + setMyFile(new MyFile(engine.getGcubeMemoryType())); + getMyFile().setOutputStream(os); + } + getMyFile().setLocalResource(LOCAL_RESOURCE.OUTPUT_STREAM); + return new RemoteResource(getMyFile(), getEngine()); + } + + /** + * Method that returns an inputStream of a remote resource + * @param path remote path of remote resource + * @return inputStream of remote resource identifies by path argument + * + */ + public InputStream RFileAsInputStream(String path){ + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + file.setType("input"); + file.setLocalResource(LOCAL_RESOURCE.VOID); + file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_INPUT_STREAM); + setMyFile(file); + engine.service.setResource(getMyFile()); + getRemoteObject(file, engine.primaryBackend, engine.volatileBackend); + InputStream is= file.getInputStream(); + file.setInputStream(null); + return is; + } + + + + + /** + * Method that returns an inputStream of a remote resource + * @param path remote path of remote resource + * @return inputStream of remote resource identifies by path argument + * + */ + @Deprecated + public InputStream RFileAStream(String path){ + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + file.setType("input"); + file.setLocalResource(LOCAL_RESOURCE.VOID); + file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_INPUT_STREAM); + setMyFile(file); + engine.service.setResource(getMyFile()); + getRemoteObject(file, engine.primaryBackend, engine.volatileBackend); + InputStream is= file.getInputStream(); + file.setInputStream(null); + return is; + } + + /** + * Method that returns an outputStream of a remote resource, used for upload operation + * @param path remote path of remote resource + * @return outputStream of remote resource identifies by path argument + * + */ + public OutputStream RFileAsOutputStream(String path){ + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + file.setType("output"); + file.setLocalResource(LOCAL_RESOURCE.VOID); + file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_OUTPUTSTREAM); + setMyFile(file); + engine.service.setResource(getMyFile()); +// retrieveRemoteObject(engine.primaryBackend); + getRemoteObject(file, engine.primaryBackend, engine.volatileBackend); + OutputStream os=file.getOutputStream(); + file.setOutputStream(null); + return os; + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResource.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResource.java new file mode 100644 index 0000000..c66a1c0 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResource.java @@ -0,0 +1,156 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryEntity; +import org.gcube.contentmanagement.blobstorage.service.operation.OperationManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.resource.StorageObject; + + +/** + * Defines the operations for selecting a remote resource. + * ex. a remote path for a download operation. + * This selection is made for all types of operation + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class RemoteResource extends Resource{ + + + + public RemoteResource(MyFile file, ServiceEngine engine) { + super(file, engine); + logger.info("file gCube parameter costructor: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + } + +/** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + public String RFile(String path) throws RemoteBackendException{ + return RFile(path, false); + } + + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + public String RFile(String path, boolean backendTypeReturned) throws RemoteBackendException{ + logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + setMyFile(file); + engine.service.setResource(getMyFile()); + Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend); + String id=null; + if(obj!=null) + id=obj.toString(); + if (backendTypeReturned&& (id!=null)) + return id+BACKEND_STRING_SEPARATOR+engine.getBackendType(); + return id; + } + + +/** + * identify a remote resource by object id + * @param id that identifies a remote resource + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + + public String RFileById(String id) throws RemoteBackendException{ + return RFileById(id, false); + } + + + /** + * identify a remote resource by object id + * @param id that identifies a remote resource + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + + public String RFileById(String id, boolean backendTypeReturned) throws RemoteBackendException{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.ID); + Object obj = executeOperation(id); + String idReturned=null; + if(obj!=null) + idReturned=obj.toString(); + if (backendTypeReturned && idReturned != null) + return idReturned+BACKEND_STRING_SEPARATOR+engine.getBackendType(); + return idReturned; + } + + + +/** + * Identify a remote folder by path + * @param dir dir remote path + * @return list of object contained in the remote dir + */ + public List RDir(String dir){ + getMyFile().setRemoteResource(REMOTE_RESOURCE.DIR); + getMyFile().setOwner(engine.owner); + if(engine.getCurrentOperation().equalsIgnoreCase("showdir")){ + dir = new BucketCoding().bucketDirCoding(dir, engine.getContext()); + TransportManagerFactory tmf= new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword()); + TransportManager tm=tmf.getTransport(engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern()); + Map mapDirs=null; + try { + mapDirs = tm.getValues(getMyFile(), dir, DirectoryEntity.class); + } catch (RemoteBackendException e) { + e.printStackTrace(); + } + List dirs=null; + if(mapDirs!=null){ + dirs = engine.addObjectsDirBucket(mapDirs); + } + if(dirs==null) + dirs=Collections.emptyList(); + return dirs; + }else if(engine.getCurrentOperation().equalsIgnoreCase("removedir")){ + if((dir != null) && (engine.owner != null)){ + DirectoryBucket dirBuc=new DirectoryBucket(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword(), dir, engine.owner); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")) + dirBuc.removeDirBucket(getMyFile(), dir, engine.getContext(), engine.getBackendType(), engine.getDbNames()); + else{ + TransportManagerFactory tmf=new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword()); + TransportManager tm=tmf.getTransport(Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern()); + dir=new BucketCoding().bucketFileCoding(dir, engine.getContext()); + try { + tm.removeDir(dir, getMyFile()); + } catch (UnknownHostException e) { + throw new RemoteBackendException(e.getMessage()); + } + + } + }else{ + logger.error("REMOVE Operation not valid:\n\t specify a valid bucketID or an author and a path on the cluster "); + } + return null; + + }else{ + throw new IllegalArgumentException("The method RDir is not applicable for the operation selected"); + } + } + +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceBoolean.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceBoolean.java new file mode 100644 index 0000000..1daf672 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceBoolean.java @@ -0,0 +1,77 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class RemoteResourceBoolean extends Resource{ + + /** + * @param file + * @param engine + */ + public RemoteResourceBoolean(MyFile file, ServiceEngine engine) { + super(file, engine); + logger.info("file gCube parameter costructor: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + } + + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + public boolean RFile(String path) throws RemoteBackendException{ + return RFile(path, false); + } + + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + public boolean RFile(String path, boolean backendTypeReturned) throws RemoteBackendException{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + Object obj = executeOperation(path); + Boolean value= new Boolean(obj.toString()); + return value; + } + + + + + /** + * identify a remote resource by object id + * @param id that identifies a remote resource + * @return remote resource id + * @throws RemoteBackendException if there are runtime exception from the remote backend + * @deprecated this method could be replace with RFile method + */ + public boolean RFileById(String id) throws RemoteBackendException{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.ID); + Object obj = executeOperation(id); + Boolean value= new Boolean(obj.toString()); + return value; +// getMyFile().setOwner(engine.owner); +// engine.service.setResource(getMyFile()); +// String idReturned=null; +// getMyFile().setRemotePath(id); +// Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend); +// if(obj!=null) +// idReturned=obj.toString(); +// return idReturned; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceComplexInfo.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceComplexInfo.java new file mode 100644 index 0000000..3bc370b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceComplexInfo.java @@ -0,0 +1,35 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; + +/** + * Manage operations that return a structured object + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class RemoteResourceComplexInfo extends Resource{ + + + public RemoteResourceComplexInfo(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return a long object to remote resource ex: the size of the resource + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + + public MyFile RFile(String path) throws RemoteBackendException{ + setMyFile(setGenericProperties(engine.getContext(), engine.owner, path, "remote")); + getMyFile().setRemotePath(path); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + engine.service.setResource(getMyFile()); + getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend); + return getMyFile(); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceDestination.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceDestination.java new file mode 100644 index 0000000..3ae2470 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceDestination.java @@ -0,0 +1,45 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; + +/** + * Manage operation results of String type + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class RemoteResourceDestination extends Resource{ + + public RemoteResourceDestination(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * + * @param remoteDestination it can be a remote path or an id + * @return + * @throws RemoteBackendException + */ + public String to(String remoteDestination) throws RemoteBackendException{ + logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + file = setGenericProperties(engine.getContext(), engine.owner, remoteDestination, "remote"); + file.setRemotePath(remoteDestination); + file.setOwner(engine.owner); + setMyFile(file); + if((remoteDestination != null) &&(ObjectId.isValid(remoteDestination))){ + getMyFile().setRemoteResource(REMOTE_RESOURCE.ID); + getMyFile().setId2(remoteDestination); + }else{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + } +// setMyFile(file); + engine.service.setResource(getMyFile()); + String bucketName=null; + logger.info("file gCube parameter after: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + bucketName=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend).toString(); + return bucketName; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceFolderInfo.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceFolderInfo.java new file mode 100644 index 0000000..d151d2e --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceFolderInfo.java @@ -0,0 +1,84 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; + + +/** + * Manage folder operation result of String type + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class RemoteResourceFolderInfo extends Resource { + + private String serviceName; + private String ownerGcube; + private String gcubeScope; + private String gcubeAccessType; + private String gcubeMemoryType; + + public RemoteResourceFolderInfo(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return a long object to remote resource ex: the size of the resource + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + + public String RDir(String path) throws RemoteBackendException{ + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + setMyFile(file); + engine.service.setResource(getMyFile()); + return getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend).toString(); + } + + + public String getServiceName() { + return serviceName; + } + + public void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + + public String getOwnerGcube() { + return ownerGcube; + } + + public void setOwnerGcube(String ownerGcube) { + this.ownerGcube = ownerGcube; + } + + public String getGcubeScope() { + return gcubeScope; + } + + public void setGcubeScope(String gcubeScope) { + this.gcubeScope = gcubeScope; + } + + public String getGcubeAccessType() { + return gcubeAccessType; + } + + public void setGcubeAccessType(String gcubeAccessType) { + this.gcubeAccessType = gcubeAccessType; + } + + + public String getGcubeMemoryType() { + return gcubeMemoryType; + } + + public void setGcubeMemoryType(String gcubeMemoryType) { + this.gcubeMemoryType = gcubeMemoryType; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceInfo.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceInfo.java new file mode 100644 index 0000000..6297d2b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceInfo.java @@ -0,0 +1,53 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; + + +/** + * Unlike the RemoteResource class, return informations to the client like a ttl or a size + * This class is used for the operations on TTL + * @author rcirillo + * + */ +public class RemoteResourceInfo extends Resource{ + + + public RemoteResourceInfo(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * identify a remote resource by path (a file or a directory) + * @param path the remote path + * @return a long object to remote resource ex: the size of the resource + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + + public long RFile(String path) throws RemoteBackendException{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + String info= executeOperation(path).toString(); + if(info!=null) + return Long.parseLong(info); + else + return -1; + } + + /** + * identify a remote resource by object id + * @param id identifies a remote file + * @return a long object to remote resource ex: the size of the resource + * @throws RemoteBackendException if there are runtime exception from the remote backend + */ + public long RFileById(String id) throws RemoteBackendException{ + getMyFile().setRemoteResource(REMOTE_RESOURCE.ID); + String info=executeOperation(id).toString(); + if(info!=null) + return Long.parseLong(info); + else + return -1; + } + +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceSource.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceSource.java new file mode 100644 index 0000000..345f419 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/RemoteResourceSource.java @@ -0,0 +1,39 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE; + +/** + * + * @author Roberto Cirillo (ISTI-CNR) + * + */ +public class RemoteResourceSource extends Resource { + + public RemoteResourceSource(MyFile file, ServiceEngine engine) { + super(file, engine); + } + + /** + * + * @param remoteIdentifier: it can be an id or a remote path + * @return + */ + public RemoteResourceDestination from(String remoteIdentifier){ + if(getMyFile() != null){ + getMyFile().setLocalPath(remoteIdentifier); + }else{ + setMyFile(setGenericProperties("", "", remoteIdentifier, "local")); + getMyFile().setLocalPath(remoteIdentifier); + } + if(ObjectId.isValid(remoteIdentifier)){ + getMyFile().setLocalResource(LOCAL_RESOURCE.ID); + getMyFile().setId(remoteIdentifier); + }else{ + getMyFile().setLocalResource(LOCAL_RESOURCE.PATH); + } + return new RemoteResourceDestination(file, engine); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/Resource.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/Resource.java new file mode 100644 index 0000000..bb1f893 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/Resource.java @@ -0,0 +1,138 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * defines a common set of operations to identify a remote resource or a local resource + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class Resource { + + final Logger logger = LoggerFactory.getLogger(ServiceEngine.class); + protected static final String BACKEND_STRING_SEPARATOR="%"; + protected MyFile file; + protected ServiceEngine engine; + + + public Resource(MyFile file, ServiceEngine engine){ + setMyFile(file); + setEngine(engine); + } + + protected ServiceEngine getEngine() { + return engine; + } + + protected void setEngine(ServiceEngine engine) { + this.engine = engine; + } + + protected MyFile getMyFile(){ + return file; + } + + protected void setMyFile(MyFile f){ + if (f!=null) + file=f; + else + logger.warn("instantiated an empty file object"); + } + + + /** + * Set generic properties on MyFile object + * @param context remote root path + * @param owner file author + * @param path remote/local relative path + * @param type remote or local + * @return the current resource + */ + protected MyFile setGenericProperties(String context, String owner, String path, String type) { + if((path != null) && (path.length()>0)){ + if(ObjectId.isValid(path)){ + if(file==null) + file= new MyFile(path, engine.getGcubeMemoryType()); + String id = file.getId(); + if((id != null) && (!id.isEmpty())) + file.setId2(path); + else + file.setId(path); + file.setRootPath(context); + file.setAbsoluteRemotePath(context); + } else{ + String[] dirs= path.split(Costants.FILE_SEPARATOR); + String name=dirs[dirs.length-1]; + if (logger.isDebugEnabled()) { + logger.debug("path(String) - name: " + name); + } + if(file == null){ + file= new MyFile(name, engine.getGcubeMemoryType()); + }else{ + file.setName(name); + } + if(type.equalsIgnoreCase("remote") && (context!=null) && context.length()>0){ + file.setRootPath(context); + path=new BucketCoding().bucketFileCoding(path, context); + file.setAbsoluteRemotePath(path); + } + String dir=path.substring(0, (path.length()-name.length())); + if (logger.isDebugEnabled()) { + logger.debug("path(String) - path: " + dir); + } + if(type.equalsIgnoreCase("local")){ + if(file.getLocalDir()== null) + file.setLocalDir(dir); + }else{ + if(file.getRemoteDir()== null) + file.setRemoteDir(dir); + } + + } + file.setOwner(owner); + }else{ + file.setOwner(owner); + file.setRootPath(context); + file.setAbsoluteRemotePath(context); + } + + return file; + } + + protected Object getRemoteObject(MyFile file, String[] backend, String[] vltBackend)throws RemoteBackendException { + Object obj=null; + obj=retrieveRemoteObject(file, backend); + if((obj == null) && (vltBackend !=null && vltBackend.length>0)) + obj=retrieveRemoteObject(file, vltBackend); + return obj; + } + + protected Object retrieveRemoteObject(MyFile file, String[] backend) throws RemoteBackendException { + Object obj=null; + if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null))) + obj=engine.service.startOperation(file,file.getRemotePath(), file.getOwner(), backend, Costants.DEFAULT_CHUNK_OPTION, file.getRootPath(), file.isReplace()); + else{ + logger.error("parameters incompatible "); + } + return obj; + } + + protected Object executeOperation(String path) { + logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope()); + file = setGenericProperties(engine.getContext(), engine.owner, path, "remote"); + file.setRemotePath(path); + file.setOwner(engine.owner); + setMyFile(file); + engine.service.setResource(getMyFile()); + Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend); + return obj; + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/ServiceEngine.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/ServiceEngine.java new file mode 100644 index 0000000..6932871 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/impl/ServiceEngine.java @@ -0,0 +1,1165 @@ +package org.gcube.contentmanagement.blobstorage.service.impl; + +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.gcube.contentmanagement.blobstorage.resource.AccessType; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.resource.StorageObject; +import org.gcube.contentmanagement.blobstorage.service.IClient; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException; +import org.gcube.contentmanagement.blobstorage.service.operation.*; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.commons.codec.binary.Base64; +import org.bson.types.ObjectId; + + + +/** + * This is the client's engine, implements the IClient interface + * and starts the build's operations for the transport manager + * + * @author Roberto Cirillo (ISTI - CNR) + */ + +public class ServiceEngine implements IClient { + + /** + * Logger for this class + */ + + final Logger logger = LoggerFactory.getLogger(ServiceEngine.class); + public String[] primaryBackend; + public String[] volatileBackend; + protected OperationManager service; + protected MyFile file; + protected String bucket; + protected String bucketID; + protected String author; + protected String owner; +// root Directory server side + protected String environment; + protected String currentOperation; + protected boolean replaceOption; + protected String publicArea; + protected String homeArea; + protected Map attributes; +//identifies the scope : public, private or group + private AccessType accessType; +// parameters for GCube instance + private String serviceName; + private String ownerGcube; + private String gcubeScope; + private AccessType gcubeAccessType; + private MemoryType gcubeMemoryType; + private String serviceClass; +// identifies the backend server type eg MongoDB, UStore + private String backendType; +// backend server username + private String user; +//backend server password + private String password; + private String passPhrase; + private String resolverHost; + private String[] dbNames; +// private static final String DEFAULT_RESOLVER_HOST= "data.d4science.org"; + private String write; + private String read; + + public ServiceEngine(String[] server){ + this.primaryBackend=server; + } + + /** + * Constructor for version 2.0.0: The object stored from version 2.0.0 are incompatibily with previous version @ + * @param server List of servers + * @param environment root directory in the remote storage + * @param accessType type of sharing: private, shared or public + * @param owner the owner of the file + * + */ + public ServiceEngine(String[] server, String environment, String accessType, String owner){ + this.primaryBackend=server; + setOwner(owner); + this.setEnvironment(environment); + setAccessType(accessType); + if(accessType.equalsIgnoreCase("public")) + this.setPublicArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR+"public"+Costants.FILE_SEPARATOR); + else + this.setPublicArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR); + this.setHomeArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR);//+"home"+FILE_SEPARATOR+owner+FILE_SEPARATOR); + } + + + + public ServiceEngine(String[] server, String [] volatileBackend, String environment, String accessType, String owner, String memory){ + if(memory.equals(MemoryType.VOLATILE.toString())&& (volatileBackend[0]!=null)){ + this.primaryBackend=volatileBackend; + }else if(memory.equals(MemoryType.VOLATILE.toString())&& (volatileBackend[0]==null)){ + this.primaryBackend=server; + this.volatileBackend=null; + }else{ + this.primaryBackend=server; + this.volatileBackend=volatileBackend; + } + setOwner(owner); + this.setEnvironment(environment); + setAccessType(accessType); + if(accessType.equalsIgnoreCase("public")) + this.setPublicArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR+"public"+Costants.FILE_SEPARATOR); + else + this.setPublicArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR); + this.setHomeArea(Costants.FILE_SEPARATOR+getEnvironment()+Costants.FILE_SEPARATOR);//+"home"+FILE_SEPARATOR+owner+FILE_SEPARATOR); + } + + + /** + * Constructor for version < 2.0.0 + * @param server backend server list + * @param id complete client id + * @param environment scope and root directory in the cluster + */ + @Deprecated + public ServiceEngine(String[] server, String id, String environment, String scope, String owner){ + this.primaryBackend=server; + setOwner(owner); + this.setEnvironment(environment); + setAccessType(scope); + this.setPublicArea(Costants.FILE_SEPARATOR+environment); + this.setHomeArea(Costants.FILE_SEPARATOR+environment); + } + + + public String getPublicArea() { + return publicArea; + } + + public void setPublicArea(String publicArea) { + logger.trace("public area is "+publicArea); + this.publicArea = publicArea; + } + + public String getHomeArea() { + return homeArea; + } + + public void setHomeArea(String rootPath) { + this.homeArea = rootPath; + } + + public String getEnvironment() { + return environment; + } + + /** + * set the remote root path + * @param environment + */ + public void setEnvironment(String environment) { +// delete initial / from variable environment + String newEnv=environment; + int ind=newEnv.indexOf('/'); + while(ind == 0){ + newEnv=environment.substring(1); + environment=newEnv; + ind=newEnv.indexOf('/'); + } + this.environment = newEnv; + } + + public String getBucketID() { + return bucketID; + } + + public void setBucketID(String bucketID) { + this.bucketID=bucketID; + + } + + /** + * download operation + * (non-Javadoc) + */ + @Override + public LocalResource get(){ + return get(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + /** + * + * @param backendType if specified it identifies the type of backend servers. eg. MongoDB, Ustore + * @return + */ + @Override + public LocalResource get(String backendType){ + file=null; + backendType=setBackendType(backendType); + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("download"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.DOWNLOAD); + return new LocalResource(file, this); + } + + + /** + * getSize operation: return the size of a remote file + */ + @Override + public RemoteResourceInfo getSize(){ + return getSize(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + /** + * @param backendType if specified it identifies the type of backend servers. eg. MongoDB, Ustore + */ + @Override + public RemoteResourceInfo getSize(String backendType){ + file=null; + backendType=setBackendType(backendType); + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getSize"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_SIZE); + return new RemoteResourceInfo(file, this); + } + + + @Override + public RemoteResourceComplexInfo getMetaFile(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getMetaFile"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_META_FILE); + return new RemoteResourceComplexInfo(file, this); + } + + public String getTotalUserVolume(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getTotalUserVolume"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_TOTAL_USER_VOLUME); + file = new Resource(file, this).setGenericProperties(getContext(), owner, null, "remote"); + file.setRemotePath("/"); + file.setOwner(owner); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + setMyFile(file); + service.setResource(getMyFile()); + Object info=null; + try { + if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null))) + info=(String)service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption()); + else{ + logger.error("parameters incompatible "); + } + + } catch (Throwable t) { + logger.error("get()", t.getCause()); + throw new RemoteBackendException(" Error in "+currentOperation+" operation ", t.getCause()); + } + if(info!=null) + return info.toString(); + else + return null; + } + + public String getUserTotalItems(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getTotalUserItems"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_USER_TOTAL_ITEMS); + file = new Resource(file, this).setGenericProperties(getContext(), owner, "", "remote"); + file.setRemotePath("/"); + file.setOwner(owner); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + setMyFile(file); + service.setResource(getMyFile()); + Object info=null; + try { + if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null))) + info=(String)service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption()); + else{ + logger.error("parameters incompatible "); + } + + } catch (Throwable t) { + logger.error("get()", t.getCause()); + throw new RemoteBackendException(" Error in "+currentOperation+" operation ", t.getCause()); + } + if(info!=null) + return info.toString(); + else + return null; + } + + public RemoteResourceFolderInfo getFolderTotalVolume(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getFolderSize"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_FOLDER_TOTAL_VOLUME); + return new RemoteResourceFolderInfo(file, this); + } + + public RemoteResourceFolderInfo getFolderTotalItems(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getFolderCount"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_FOLDER_TOTAL_ITEMS); + return new RemoteResourceFolderInfo(file, this); + } + + public RemoteResourceFolderInfo getFolderLastUpdate(){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("getFolderLastUpdate"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.GET_FOLDER_LAST_UPDATE); + return new RemoteResourceFolderInfo(file, this); + } + + + /** + * upload operation + * path nome bucket + * @param replace true if the remote file will be replaced + * @return LocalResource object + */ + @Override + public LocalResource put(boolean replace){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("put() - start"); + } + setCurrentOperation("upload"); + setReplaceOption(replace); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.UPLOAD); + file.setReplaceOption(replace); + return new LocalResource(file, this); + } + + + /** + * upload operation + * path nome bucket + * @param replace true if the remote file will be replaced + * @param mimeType: the file mimeType + * @return LocalResource object + */ + @Override + public LocalResource put(boolean replace, String mimeType){ + file=null; + if (logger.isDebugEnabled()) { + logger.debug("put() - start"); + } + setCurrentOperation("upload"); + setReplaceOption(replace); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.UPLOAD); + file=setMimeType(file, mimeType); + file.setReplaceOption(replace); + return new LocalResource(file, this); + } + + /** + * remove operation + * @return RemoteResource object + */ + @Override + public RemoteResource remove(){ + return remove(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + + @Override + public RemoteResource remove(String backendType){ + backendType=setBackendType(backendType); + file=new MyFile(getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + // remove object operation + setCurrentOperation("remove"); + file=setOperationInfo(file, OPERATION.REMOVE); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResource(file, this); + } + + + public MyFile getMyFile() { + return file; + } + + public void setMyFile(MyFile myFile) { + this.file = myFile; + } + + public String getBucket() { + return bucket; + } + + public void setBucket(String bucket) { + this.bucket = bucket; + } + + /** + * show the content of the remote directory + */ + @Override + public RemoteResource showDir(){ + return showDir(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + + /** + * show the content of the remote directory + */ + @Override + public RemoteResource showDir(String backendType){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + setCurrentOperation("showDir"); + file=setOperationInfo(file, OPERATION.SHOW_DIR); + return new RemoteResource(file, this); + } + + + + /** + * @param mapDirs + * @return + */ + List addObjectsDirBucket(Map mapDirs) { + List dirs; + Set dirsKeys=mapDirs.keySet(); + dirs= new ArrayList (dirsKeys.size()); + for(java.util.Iterator it=dirsKeys.iterator();it.hasNext(); ){ + String key=it.next(); + key =new BucketCoding().bucketDirDecoding(key, getContext()); + logger.debug("add "+key); + dirs.add(mapDirs.get(key)); + } + return dirs; + } + + @Override + public RemoteResource removeDir(){ + return removeDir(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResource removeDir(String backendType){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + setCurrentOperation("removedir"); + file=setOperationInfo(file, OPERATION.REMOVE_DIR); + return new RemoteResource(file, this); + } + + + @Override + public RemoteResource getUrl(){ + return getUrl(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResource getUrl(boolean forceCreation){ + return getUrl(Costants.DEFAULT_TRANSPORT_MANAGER, forceCreation); + } + + + @Override + public RemoteResource getUrl(String backendType){ + return getUrl(backendType, false); + } + + + @Override + public RemoteResource getUrl(String backendType, boolean forceCreation){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + file.setResolverHost(getResolverHost()); + file.forceCreation(forceCreation); + file.setPassPhrase(passPhrase); + setCurrentOperation("getUrl"); + file=setOperationInfo(file, OPERATION.GET_URL); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + RemoteResource resource=new RemoteResource(file, this); + return resource; + } + + @Override + public RemoteResource getHttpUrl(){ + return getHttpUrl(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResource getHttpUrl(boolean forceCreation){ + return getHttpUrl(Costants.DEFAULT_TRANSPORT_MANAGER, forceCreation); + } + + @Override + public RemoteResource getHttpUrl(String backendType){ + return getHttpUrl(backendType, false); + } + + + @Override + public RemoteResource getHttpUrl(String backendType, boolean forceCreation){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + file.setResolverHost(getResolverHost()); + file.forceCreation(forceCreation); + file.setPassPhrase(passPhrase); + setCurrentOperation("getHttpUrl"); + file=setOperationInfo(file, OPERATION.GET_HTTP_URL); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + RemoteResource resource=new RemoteResource(file, this); + return resource; + } + + /*HTTPS URL BEGIN*/ + + @Override + public RemoteResource getHttpsUrl(){ + return getHttpsUrl(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResource getHttpsUrl(boolean forceCreation){ + return getHttpsUrl(Costants.DEFAULT_TRANSPORT_MANAGER, forceCreation); + } + + @Override + public RemoteResource getHttpsUrl(String backendType){ + return getHttpsUrl(backendType, false); + } + + + @Override + public RemoteResource getHttpsUrl(String backendType, boolean forceCreation){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGcubeAccessType(this.getGcubeAccessType()); + file.setGcubeScope(this.getGcubeScope()); + file.setOwnerGcube(this.getOwnerGcube()); + file.setServiceName(this.getServiceName()); + file.setServiceClass(this.getServiceClass()); + file.setResolverHost(getResolverHost()); + file.forceCreation(forceCreation); + file.setPassPhrase(passPhrase); + setCurrentOperation("getHttpsUrl"); + file=setOperationInfo(file, OPERATION.GET_HTTPS_URL); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + RemoteResource resource=new RemoteResource(file, this); + return resource; + } + + /*HTTPS URL END*/ + + + + /** + * + * @return private o public + */ + public String getContext(){ + if(isPublic()){ + return getPublicArea(); + } + return getHomeArea(); + } + + + public boolean isPublic(){ + if(getScope()!=null) + return getScope().equalsIgnoreCase("public"); + return Costants.DEFAULT_SCOPE.equalsIgnoreCase("public"); + } + + public String getScope() { + return accessType.toString(); + } + +// public void setScope(String scope) { +// if(scope.equalsIgnoreCase("public") || scope.equalsIgnoreCase("private") || scope.equalsIgnoreCase("group")) +// this.accessType = scope; +// else +// throw new IllegalArgumentException("bad scope usage: public | group | private "); +// } + + + public String getCurrentOperation() { + return currentOperation; + } + + public void setCurrentOperation(String currentOperation) { + this.currentOperation = currentOperation; + } + + public boolean isReplaceOption() { + return replaceOption; + } + + public void setReplaceOption(boolean replaceOption) { + this.replaceOption = replaceOption; + } + + + public AmbiguousResource lock() { + return lock(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + public AmbiguousResource lock(String backendType) { + backendType=setBackendType(backendType); + file = new MyFile(true); + setCurrentOperation("lock"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.LOCK); + return new AmbiguousResource(file, this); + } + + + @Override + public AmbiguousResource unlock(String key) { + return unlock(key, Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public AmbiguousResource unlock(String key, String backendType) { + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setLockedKey(key); +// put(true); + setCurrentOperation("unlock"); + file=setOperationInfo(file, OPERATION.UNLOCK); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new AmbiguousResource(file, this); + } + + + @Override + public RemoteResourceInfo getTTL() { + return getTTL(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResourceInfo getTTL(String backendType) { + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); +// put(true); + setCurrentOperation("getTTL"); + file=setOperationInfo(file, OPERATION.GET_TTL); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResourceInfo(file, this); + } + + + @Override + public RemoteResource getMetaInfo(String field) { + return getMetaInfo(field, Costants.DEFAULT_TRANSPORT_MANAGER); + + } + + @Override + public RemoteResource getMetaInfo(String field, String backendType) { + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGenericPropertyField(field); + setCurrentOperation("getMetaInfo"); + file=setOperationInfo(file, OPERATION.GET_META_INFO); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResource(file, this); + } + + @Override + public RemoteResource setMetaInfo(String field, String value) { + return setMetaInfo(field, value, Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResource setMetaInfo(String field, String value, String backendType) { + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setGenericPropertyField(field); + file.setGenericPropertyValue(value); + setCurrentOperation("setMetaInfo"); + file=setOperationInfo(file, OPERATION.SET_META_INFO); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResource(file, this); + } + + @Override + public RemoteResourceInfo renewTTL(String key) { + return renewTTL(key, Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResourceInfo renewTTL(String key, String backendType) { + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); + file.setLockedKey(key); +// put(true); + setCurrentOperation("renewTTL"); + file=setOperationInfo(file, OPERATION.RENEW_TTL); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResourceInfo(file, this); + } + + + + @Override + public RemoteResourceSource linkFile() { + return linkFile(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResourceSource linkFile(String backendType) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("link"); + file=setOperationInfo(file, OPERATION.LINK); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + } + + + + @Override + public RemoteResourceSource copyFile() { + return copyFile(Costants.DEFAULT_TRANSPORT_MANAGER, Costants.DEFAULT_REPLACE_OPTION); + } + + + @Override + public RemoteResourceSource copyFile(String backendType) { + return copyFile(backendType, Costants.DEFAULT_REPLACE_OPTION); + } + + @Override + public RemoteResourceSource copyFile(boolean replaceOption) { + return copyFile(Costants.DEFAULT_TRANSPORT_MANAGER, replaceOption); + } + + @Override + public RemoteResourceSource copyFile(String backendType, boolean replaceOption) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("copy"); + file=setOperationInfo(file, OPERATION.COPY); + file.setReplaceOption(replaceOption); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + } + + + @Override + public RemoteResource duplicateFile() { + return duplicateFile(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + + @Override + public RemoteResource duplicateFile(String backendType) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("duplicate"); + file=setOperationInfo(file, OPERATION.DUPLICATE); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResource(file, this); + } + + @Override + public RemoteResourceSource softCopy() { + return softCopy(Costants.DEFAULT_TRANSPORT_MANAGER, Costants.DEFAULT_REPLACE_OPTION); + } + + + @Override + public RemoteResourceSource softCopy(String backendType) { + return softCopy(backendType, Costants.DEFAULT_REPLACE_OPTION); + } + + @Override + public RemoteResourceSource softCopy(boolean replaceOption) { + return softCopy(Costants.DEFAULT_TRANSPORT_MANAGER, replaceOption); + } + + @Override + public RemoteResourceSource softCopy(String backendType, boolean replaceOption) { + file=null; + setCurrentOperation("softcopy"); + file=setOperationInfo(file, OPERATION.SOFT_COPY); + file.setReplaceOption(replaceOption); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + + } + + + + + @Override + public RemoteResourceSource moveFile() { + return moveFile(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + @Override + public RemoteResourceSource moveFile(String backendType) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("move"); + file=setOperationInfo(file, OPERATION.MOVE); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + } + + + @Override + public RemoteResourceSource copyDir() { + return copyDir(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + + @Override + public RemoteResourceSource copyDir(String backendType) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("copy_dir"); + file=setOperationInfo(file, OPERATION.COPY_DIR); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + } + + + @Override + public RemoteResourceSource moveDir() { + return moveDir(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + + @Override + public RemoteResourceSource moveDir(String backendType) { + backendType=setBackendType(backendType); + file=null; + setCurrentOperation("move_dir"); + file=setOperationInfo(file, OPERATION.MOVE_DIR); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames()); + return new RemoteResourceSource(file, this); + } + + @Override + public void close(){ + currentOperation="close"; + file.setOwner(owner); + getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH); + setMyFile(file); + service.setResource(getMyFile()); + service.setTypeOperation("close"); + try { + if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null))) + service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption()); + else{ + logger.error("parameters incompatible "); + } + + } catch (Throwable t) { + logger.error("get()", t.getCause()); + throw new RemoteBackendException(" Error in "+currentOperation+" operation ", t.getCause()); + } + } + + + + public String getServiceClass() { + return serviceClass; + } + + public void setServiceClass(String serviceClass) { + this.serviceClass = serviceClass; + } + + public String getServiceName() { + return serviceName; + } + + public void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + + public String getOwnerGcube() { + return ownerGcube; + } + + public void setOwnerGcube(String ownerGcube) { + this.ownerGcube = ownerGcube; + } + + public String getGcubeScope() { + return gcubeScope; + } + + public void setGcubeScope(String gcubeScope) { + this.gcubeScope = gcubeScope; + } + + public AccessType getGcubeAccessType() { + return gcubeAccessType; + } + + public void setGcubeAccessType(String gcubeAccessType) { + if(gcubeAccessType.equals(AccessType.PUBLIC.toString())){ + this.gcubeAccessType=AccessType.PUBLIC; + }else if(gcubeAccessType.equals(AccessType.SHARED.toString())){ + this.gcubeAccessType=AccessType.SHARED; + }else if(gcubeAccessType.equals(AccessType.PRIVATE.toString())){ +// the shared scope is a private scope. + this.gcubeAccessType=AccessType.PRIVATE; + }else{ + throw new RuntimeException("invalid AccessType"); + } +// this.gcubeAccessType = gcubeAccessType; + } + + public MemoryType getGcubeMemoryType() { + return gcubeMemoryType; + } + + public void setGcubeMemoryType(String gcubeMemoryType) { + if(gcubeMemoryType.equals(MemoryType.PERSISTENT.toString())){ + this.gcubeMemoryType=MemoryType.PERSISTENT; + }else if(gcubeMemoryType.equals(MemoryType.VOLATILE.toString())){ + this.gcubeMemoryType=MemoryType.VOLATILE; + }else if(gcubeMemoryType.equals(MemoryType.BOTH.toString())){ +// the shared scope is a private scope. + this.gcubeMemoryType=MemoryType.BOTH; + }else{ + throw new RuntimeException("invalid MemoryType"); + } +// this.gcubeMemoryType = gcubeMemoryType; + } + + private MyFile setOperationInfo(MyFile file, OPERATION op) { + if(file==null) + file=new MyFile(this.getGcubeMemoryType()); + file.setOperation(op); + if(getWriteConcern() != null) + file.setWriteConcern(getWriteConcern()); + if(getReadConcern() != null) + file.setReadPreference(getReadConcern()); + return file; + } + + private MyFile setMimeType(MyFile file, String mime) { + if(file==null) + file=new MyFile(this.getGcubeMemoryType()); + file.setMimeType(mime); + return file; + } + + public String setBackendType(String backendType) { + if(backendType!=null) + this.backendType=backendType; + return this.backendType; + } + + public String getBackendType(){ + return backendType; + } + + public String getBackendUser(){ + return this.user; + } + + public void setBackendUser(String user) { + if(user!=null) + this.user=user; + + } + + public String getBackendPassword(){ + return this.password; + } + + public void setBackendPassword(String password) { + if(password != null) + this.password=password; + + } + + public void setResolverHost(String resolverHost) { + this.resolverHost=resolverHost; + + } + + public String getResolverHost(){ + if (resolverHost != null) + return resolverHost; + return Costants.DEFAULT_RESOLVER_HOST; + } + + public String getPassPhrase() { + return passPhrase; + } + + public void setPassPhrase(String passPhrase) { + this.passPhrase = passPhrase; + } + + + public String getId(String id){ + if(ObjectId.isValid(id)) + return id; + try { + if(Base64.isBase64(id)){ + byte[] valueDecoded= Base64.decodeBase64(id); + String encryptedID = new String(valueDecoded); + return new Encrypter("DES", getPassPhrase()).decrypt(encryptedID); + }else{ + return new Encrypter("DES", getPassPhrase()).decrypt(id); + } + } catch (EncryptionException e) { + e.printStackTrace(); + } + return null; + } + + public RemoteResource getRemotePath(){ + backendType=setBackendType(backendType); + file=new MyFile(this.getGcubeMemoryType()); +// put(true); + setCurrentOperation("getRemotePath"); + file=setOperationInfo(file, OPERATION.GET_REMOTE_PATH); + file.setRootPath(this.getPublicArea()); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + return new RemoteResource(file, this); + } + + public String getWriteConcern() { + return write; + } + + public void setWriteConcern(String write) { + this.write = write; + } + + public String getReadConcern() { + return read; + } + + public void setReadConcern(String read) { + this.read = read; + } + + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + if (owner == null) + throw new RuntimeException("The owner cannot be null"); + this.owner = owner; + } + + private void setAccessType(String accessType) { + if(accessType.equals(AccessType.PUBLIC.toString())){ + this.accessType=AccessType.PUBLIC; + }else{ +// the shared scope is a private scope. + this.accessType=AccessType.PRIVATE; + } + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.IClient#setDbNames(java.lang.String[]) + */ + @Override + public void setDbNames(String[] dbs) { + this.dbNames=dbs; + + } + + protected String[] getDbNames(){ + return this.dbNames; + } + + /** + * getSize operation: return the size of a remote file + */ + @Override + public RemoteResourceBoolean exist(){ + return exist(Costants.DEFAULT_TRANSPORT_MANAGER); + } + + /** + * @param backendType if specified it identifies the type of backend servers. eg. MongoDB, Ustore + */ + @Override + public RemoteResourceBoolean exist(String backendType){ + file=null; + backendType=setBackendType(backendType); + if (logger.isDebugEnabled()) { + logger.debug("get() - start"); + } + setCurrentOperation("exist"); + this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames()); + file=setOperationInfo(file, OPERATION.EXIST); + return new RemoteResourceBoolean(file, this); + } + +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkConsumer.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkConsumer.java new file mode 100644 index 0000000..bd8b631 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkConsumer.java @@ -0,0 +1,146 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Upload the chunks in a concurrent safe mode is used only for terrastore + * @author rcirillo + * + */ +public class ChunkConsumer implements Runnable { + /** + * Logger for this class + */ + final Logger logger = LoggerFactory.getLogger(ChunkConsumer.class); + private Monitor monitor; + private int id; + private String[] server; + private String user; + private String password; + private static String bucketName; + + boolean isChunk=false; + String[] dbNames; + public static ThreadLocal client=new ThreadLocal(); + public static ThreadLocal resource=new ThreadLocal(); + private boolean replaceOpt; + Thread producer; + + public void run(){ + if (logger.isDebugEnabled()) { + logger.debug("run() - start"); + } + MyFile request = null; + synchronized (ChunkConsumer.class) { + request=monitor.getRequest(); + resource.set(request); + } + // ... actions for manage the requests ... + connection(resource.get()); + if (logger.isDebugEnabled()) { + logger.debug("run() - end"); + } + } + + private void connection(MyFile richiesta) { + if (logger.isDebugEnabled()) { + logger.debug("connection(MyFile) - start"); + } + try{ + if (logger.isDebugEnabled()) { + logger.debug("connection(MyFile) - request fetched: " + + resource.get().getKey() + + " current Thread: " + + Thread.currentThread()); + } + putInTerrastore(resource.get()); + }catch(Exception e){ + logger.warn("connection(MyFile)- upload"+ e.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("connection(MyFile) - retry PUT"); + } + connection(resource.get()); + } + if (logger.isDebugEnabled()) { + logger.debug("connection(MyFile) - end"); + } + } + + public ChunkConsumer(Monitor monitor, int id, String[] server, String user, String password, String[] dbNames, boolean isChunk, String bucket, boolean replaceOption){ + this.monitor = monitor; + this.id = id; + this.server=server; + this.user=user; + this.password=password; + bucketName=bucket; + this.isChunk=isChunk; + this.dbNames=dbNames; + this.replaceOpt=replaceOption; + } + + private String[] randomizeServer(String[] server) { + int len=server.length; + if(logger.isDebugEnabled()) + logger.debug("array server length: "+len); + int n = (int)(Math.random()*10); + if(logger.isDebugEnabled()) + logger.debug("random number: "+n); + int start=0; + if(n>0){ + start=len%n; + if(start>0) + start--; + if(logger.isDebugEnabled()) + logger.debug("start index: "+start); + String temp=server[0]; + server[0]=server[start]; + server[start]=temp; + } + if(logger.isDebugEnabled()) + logger.debug("Server 0: "+server[0]); + return server; + } + + private void putInTerrastore(MyFile myFile) { + if (logger.isDebugEnabled()) { + logger.debug("putInTerrastore(MyFile) - start"); + } + long start=0; + if(client.get()==null){ + start=System.currentTimeMillis(); + synchronized(ChunkConsumer.class){ + String [] randomServer=randomizeServer(server); + TransportManagerFactory tmf=new TransportManagerFactory(randomServer, null, null); + client.set(tmf.getTransport(Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference())); + } + if(logger.isDebugEnabled()){ + logger.debug("waiting time for upload: " + + (System.currentTimeMillis() - start) + " su: " + + resource.get().getKey()); + } + } + start=System.currentTimeMillis(); + try{ +// client.get().put(resource.get(), bucketName, resource.get().getKey(), replaceOpt); + UploadOperator upload=new UploadOperator(server, user, password, bucketName, monitor, isChunk , null, dbNames); + client.get().put(upload); + }catch(Exception e){ + logger.error("ERROR IN CLUSTER CONNECTION ", e); + monitor.putRequest(resource.get()); + } + if(logger.isDebugEnabled()){ + logger.debug("Time for upload: " + + (System.currentTimeMillis() - start) + " on: " + + resource.get().getKey()); + } + if (logger.isDebugEnabled()) { + logger.debug("putInTerrastore(MyFile) - end"); + } + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkOptimization.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkOptimization.java new file mode 100644 index 0000000..f86aead --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/ChunkOptimization.java @@ -0,0 +1,49 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implements a simple algorithm for calculating the size of the chunk + * @author Roberto Cirillo (ISTI-CNR) + * + */ + +public class ChunkOptimization { + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(ChunkOptimization.class); + private long fileSize; + + public ChunkOptimization(long dimensioneFile){ + if (logger.isDebugEnabled()) { + logger.debug("ChunkOptimization(long) - Dimensione del file: " + + dimensioneFile); + } + this.fileSize=dimensioneFile; + } + + public int chunkCalculation(){ + long chunkSize=0; + if(fileSize<= Costants.sogliaDimensioneMinima){ + chunkSize=fileSize; + }else{ + // numero chunk + for(int i=Costants.sogliaNumeroMinimo; i Costants.sogliaDimensioneMinima)){ + break; + }else if(chunkSize ids=null; + try { +// ids=tm.copyDir(myFile, sourcePath, destinationPath); + ids=tm.copyDir(this); + } catch (UnknownHostException e) { + tm.close(); + logger.error("Problem in copyDir from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage()); + throw new RemoteBackendException(" Error in copyDir operation ", e.getCause()); + } + return ids.toString(); + } + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { +// DirectoryBucket dirBuc=new DirectoryBucket(server, user, password, remotePath, author); +// For terrastore, the name of bucket is formed: path_____fileName_____author +// String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + this.sourcePath=resource.getLocalPath(); + this.destinationPath=resource.getRemotePath(); + sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea); + setResource(resource); + return bucket=destinationPath; + } + + public abstract List execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException; + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Download.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Download.java new file mode 100644 index 0000000..c90d26e --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Download.java @@ -0,0 +1,111 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.OutputStream; + +/** + * Implements a download operation from the cluster: download a file object + * + *@author Roberto Cirillo (ISTI - CNR) + */ + +public abstract class Download extends Operation{ + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Download.class); + final Logger logger=LoggerFactory.getLogger(Download.class); + protected String localPath; + protected String remotePath; + protected OutputStream os; + protected MyFile resource; + + public Download(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + this.localPath=file.getLocalPath(); + this.remotePath=remotePath; + setResource(file); + return getRemoteIdentifier(remotePath, rootArea); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + String id=null; + if (logger.isDebugEnabled()) { + logger.debug(" DOWNLOAD " + myFile.getRemotePath() + + " in bucket: " + getBucket()); + } + try { + id=get(this, myFile, false); + + } catch (Throwable e) { + TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword()); + TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + logger.error("Problem in download from: "+myFile.getRemotePath()+": "+e.getMessage()); +// e.printStackTrace(); + throw new RemoteBackendException(" Problem in download operation ", e.getCause()); + } + return id; + } + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { +// DirectoryBucket dirBuc=new DirectoryBucket(server, getUser(), getPassword(), remotePath, author); +// For terrastore, the name of bucket is formed: path_____fileName_____author + String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); +// DirectoryEntity dirObject=null; + this.os=resource.getOutputStream(); + setBucket(bucketName); + return bucketName; + } + + public abstract ObjectId execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException; + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + public String getLocalPath() { + return localPath; + } + + public void setLocalPath(String localPath) { + this.localPath = localPath; + } + + public String getRemotePath() { + return remotePath; + } + + public void setRemotePath(String remotePath) { + this.remotePath = remotePath; + } + + public OutputStream getOs() { + return os; + } + + public void setOs(OutputStream os) { + this.os = os; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DownloadAndLock.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DownloadAndLock.java new file mode 100644 index 0000000..7626800 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DownloadAndLock.java @@ -0,0 +1,67 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.DownloadOperator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DownloadAndLock extends Operation { + + final Logger logger=LoggerFactory.getLogger(Download.class); + private String localPath; + private String remotePath; + private OutputStream os; +/** + * @deprecated + * @param server + * @param bucket + * @param monitor + * @param isChunk + * + */ + public DownloadAndLock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + if (logger.isDebugEnabled()) { + logger.debug(" DOWNLOAD " + myFile.getRemotePath() + + " in bucket: " + getBucket()); + } + Download download = new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); + try { + //TODO add field for file lock + get(download,myFile, true); + } catch (Exception e) { + TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword()); + TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + throw new RemoteBackendException(" Error in downloadAndLock operation ", e.getCause()); + } + return null; + } + + @Override + public String initOperation(MyFile file, String RemotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + this.localPath=file.getLocalPath(); + this.remotePath=remotePath; + return getRemoteIdentifier(remotePath, rootArea); + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DuplicateFile.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DuplicateFile.java new file mode 100644 index 0000000..dadf6dc --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/DuplicateFile.java @@ -0,0 +1,93 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public abstract class DuplicateFile extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(DuplicateFile.class); + protected String sourcePath; + protected MyFile resource; + + public DuplicateFile(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String id=null; + try { +// id = tm.duplicateFile(myFile, bucket); + id = tm.duplicateFile(this); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return id; + } + + @Override + public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) { + if(remotePath != null){ + boolean isId=ObjectId.isValid(remotePath); + setResource(file); + if(!isId){ +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + }else throw new RemoteBackendException("argument cannot be null"); + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance); + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Exist.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Exist.java new file mode 100644 index 0000000..8f4380b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Exist.java @@ -0,0 +1,72 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Implements a Exist operation: check if a given object exist + * @author Roberto Cirillo (ISTI - CNR) 2018 + * + */ + +public class Exist extends Operation{ + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(Exist.class); + + public Exist(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + boolean isPresent=false; + try { + isPresent = tm.exist(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in Exist operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return isPresent+""; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with Exist operation"); + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/FileWriter.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/FileWriter.java new file mode 100644 index 0000000..5341488 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/FileWriter.java @@ -0,0 +1,99 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +//import org.apache.log4j.Logger; +//import org.gcube.common.core.utils.logging.GCUBELog; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.OutputStream; + +/** + * A thread that write the chunk in a output stream specified + * + *@author Roberto Cirillo (ISTI - CNR) + * + */ + +public class FileWriter extends Thread{ + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(FileWriter.class); + final Logger logger=LoggerFactory.getLogger(FileWriter.class); + private Monitor monitor; + private int id; +// private MyFile myFile; +// private byte[] encode; +// private int offset; +// private static int len=0; + private OutputStream out; +// private String path; + private byte[] full; + + + public synchronized void run(){ + if (logger.isDebugEnabled()) { + logger.debug("run() - start"); + } + MyFile request = monitor.getRequest(); + synchronized (FileWriter.class) { + if(logger.isDebugEnabled()){ + logger.debug("recover request: "+request.getKey()+" length: "+request.getContent().length); + } + try { + decodeByte2File(request.getContent()); + out.flush(); + } catch (Exception e) { + logger.error("run()", e); + } + } + if (logger.isDebugEnabled()) { + logger.debug("run() - end"); + } + } + + public FileWriter(Monitor monitor, OutputStream out, byte[] fullEncode){ + this.monitor=monitor; + this.out=out; + this.full=fullEncode; + } + + public FileWriter(Monitor monitor, OutputStream out){ + this.monitor = monitor; + this.out=out; + } + + public FileWriter(Monitor monitor, int id){ + this.monitor = monitor; + this.id = id; + } + + public void decodeByte2File(byte[] encode, int offset, int len){ + try { + out.write(encode, offset, len); + if(logger.isDebugEnabled()) + logger.debug("write from pos:"+offset+" to pos: "+len); + } catch (IOException e) { + logger.error("decodeByte2File(byte[], int, int)", e); + } + if(logger.isDebugEnabled()) + logger.debug("New file created!"); + } + + public void decodeByte2File(byte[] encode){ + if (logger.isDebugEnabled()) { + logger.debug("decodeByte2File(byte[]) - start"); + logger.debug("encode.length: "+encode.length); + } + try { + out.write(encode); + } catch (Exception e) { + logger.error("scrittura chunk non riuscita!!"); + logger.error("decodeByte2File(byte[])", e); + } + if (logger.isDebugEnabled()) { + logger.debug("decodeByte2File(byte[]) - end"); + } + } +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderCount.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderCount.java new file mode 100644 index 0000000..2eb4d83 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderCount.java @@ -0,0 +1,64 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetFolderCount extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetFolderCount(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType,dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long dim=0; + try { + dim = tm.getFolderTotalItems(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in getFolderTotalItems operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return ""+dim; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author); + dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames); + } + return bucket=buck; + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderLastUpdate.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderLastUpdate.java new file mode 100644 index 0000000..a9cb5c9 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderLastUpdate.java @@ -0,0 +1,52 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetFolderLastUpdate extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetFolderLastUpdate(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + return null; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author); + dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames); + } + return bucket=buck; + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderSize.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderSize.java new file mode 100644 index 0000000..4ce2fab --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetFolderSize.java @@ -0,0 +1,65 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetFolderSize extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetFolderSize(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long dim=0; + try { + dim = tm.getFolderTotalVolume(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in getFolderTotalVolume operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return ""+dim; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author); + dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames); +// String bucketName=null; + } + return bucket=buck; + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpUrl.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpUrl.java new file mode 100644 index 0000000..0a38869 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpUrl.java @@ -0,0 +1,103 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.IOException; +import java.net.URL; +import org.apache.commons.codec.binary.Base64; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + +public class GetHttpUrl extends Operation { + +// private OutputStream os; + TransportManager tm; + + public GetHttpUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String initOperation(MyFile file, String remotePath, String author, + String[] server, String rootArea, boolean replaceOption) { + return getRemoteIdentifier(remotePath, rootArea); + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + + + @Override + public Object doIt(MyFile myFile) throws RemoteBackendException { + String resolverHost=myFile.getResolverHOst(); + String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR; + String urlParam=""; + try { + String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference()); + String phrase=myFile.getPassPhrase(); +// urlParam =new StringEncrypter("DES", phrase).encrypt(id); + urlParam = new Encrypter("DES", phrase).encrypt(id); +// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8"); + } catch (EncryptionException e) { + throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause()); + } + logger.info("URL generated: "+urlBase+urlParam); + String smpUrl=urlBase+urlParam; + logger.info("URL generated: "+smpUrl); + smpUrl=smpUrl.replace("smp://", "http://"); + URL httpUrl=null; + try { + httpUrl=translate(new URL(smpUrl)); + } catch (IOException e) { + e.printStackTrace(); + } + logger.info("URL translated: "+httpUrl); + if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){ + return httpUrl.toString()+Costants.VOLATILE_URL_IDENTIFICATOR; + } + return httpUrl.toString(); + } + + + private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){ + String id=null; + if(tm ==null){ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference); + } + try { + id = tm.getId(bucket, forceCreation); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return id; + } + + private URL translate(URL url) throws IOException { + logger.debug("translating: "+url); + String urlString=url.toString(); + String baseUrl="http://"+url.getHost()+"/"; + logger.debug("base Url extracted is: "+baseUrl); +// int index=urlString.lastIndexOf(".org/"); + String params = urlString.substring(baseUrl.length()); + logger.debug("get params: "+baseUrl+" "+params); + //encode params + params=Base64.encodeBase64URLSafeString(params.getBytes("UTF-8")); +// URLEncoder.encode(params, "UTF-8"); + // merge string + urlString=baseUrl+params; + logger.info("uri translated in http url: "+urlString); + return new URL(urlString); + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpsUrl.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpsUrl.java new file mode 100644 index 0000000..1dcaa0d --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetHttpsUrl.java @@ -0,0 +1,103 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.IOException; +import java.net.URL; + +import org.apache.commons.codec.binary.Base64; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + +public class GetHttpsUrl extends Operation { + +// private OutputStream os; + TransportManager tm; + + public GetHttpsUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String initOperation(MyFile file, String remotePath, String author, + String[] server, String rootArea, boolean replaceOption) { + return getRemoteIdentifier(remotePath, rootArea); + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + + + @Override + public Object doIt(MyFile myFile) throws RemoteBackendException { + String resolverHost=myFile.getResolverHOst(); + String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR; + String urlParam=""; + try { + String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference()); + String phrase=myFile.getPassPhrase(); +// urlParam =new StringEncrypter("DES", phrase).encrypt(id); + urlParam = new Encrypter("DES", phrase).encrypt(id); +// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8"); + } catch (EncryptionException e) { + throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause()); + } + logger.info("URL generated: "+urlBase+urlParam); + String smpUrl=urlBase+urlParam; + logger.info("URL generated: "+smpUrl); + smpUrl=smpUrl.replace("smp://", "https://"); + URL httpsUrl=null; + try { + httpsUrl=translate(new URL(smpUrl)); + } catch (IOException e) { + e.printStackTrace(); + } + logger.info("URL translated: "+httpsUrl); + if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){ + return httpsUrl.toString()+Costants.VOLATILE_URL_IDENTIFICATOR; + } + return httpsUrl.toString(); + } + + private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){ + String id=null; + if(tm ==null){ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference); + } + try { + id = tm.getId(bucket, forceCreation); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return id; + } + + private URL translate(URL url) throws IOException { + logger.debug("translating: "+url); + String urlString=url.toString(); + String baseUrl="https://"+url.getHost()+"/"; + logger.debug("base Url extracted is: "+baseUrl); +// int index=urlString.lastIndexOf(".org/"); + String params = urlString.substring(baseUrl.length()); + logger.debug("get params: "+baseUrl+" "+params); + //encode params + params=Base64.encodeBase64URLSafeString(params.getBytes("UTF-8")); +// URLEncoder.encode(params, "UTF-8"); + // merge string + urlString=baseUrl+params; + logger.info("uri translated in https url: "+urlString); + return new URL(urlString); + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaFile.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaFile.java new file mode 100644 index 0000000..10a1710 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaFile.java @@ -0,0 +1,86 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetMetaFile extends Operation{ + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetMetaFile(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + /** + * Only the following values will be returned: + * mimeType, + * owner, + * id, + * name, + * remotePath, + * size. + * + */ + public MyFile doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long dim=0; + String id=null; + String mime=null; + try { + dim = tm.getSize(bucket); + id=tm.getId(bucket, false); + mime=tm.getFileProperty(bucket, "mimetype"); + myFile.setOwner(tm.getFileProperty(bucket, "owner")); + if(tm.isValidId(bucket)){ + myFile.setRemotePath(tm.getFileProperty(bucket, "filename")); + myFile.setAbsoluteRemotePath(tm.getFileProperty(bucket, "filename")); + myFile.setName(tm.getFileProperty(bucket, "name")); + } + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetMetaFile operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + myFile.setSize(dim); + myFile.setId(id); + myFile.setMimeType(mime); + + return myFile; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaInfo.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaInfo.java new file mode 100644 index 0000000..f459029 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetMetaInfo.java @@ -0,0 +1,62 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetMetaInfo extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetMetaInfo(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String value=null; + try { + value=tm.getFileProperty(bucket, myFile.getGenericPropertyField()); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetMetaFile operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + + return value; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("method not compatible with getMetaInfo operation"); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetRemotePath.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetRemotePath.java new file mode 100644 index 0000000..9e18d4b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetRemotePath.java @@ -0,0 +1,70 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetRemotePath extends Operation{ + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + private String rootPath; + + public GetRemotePath(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType,dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String path=null; + try { + path = tm.getRemotePath(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + logger.debug("\t path "+path+"\n\t rootPath: "+rootPath); + int rootLength=rootPath.length(); + if((path.length() >= rootLength)){ + path=path.substring(rootLength-1); + System.out.println("new relative path "+ path); + return path; + }else{ + throw new RuntimeException("expected rootPath or expected relative path are malformed: rootPath: "+rootPath+ " relativePath: "+path); + } + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + rootPath=file.getRootPath(); + logger.trace("rootArea is "+file.getRootPath()+ " absoluteremotepath is "+file.getAbsoluteRemotePath()); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + throw new RuntimeException("the getRemotePath method have an invalid id"+ remotePath); + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetSize.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetSize.java new file mode 100644 index 0000000..a1d0e4d --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetSize.java @@ -0,0 +1,68 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Implements a getSize operation from the remote system: return the dimension of a file in the remote system + * @author Roberto Cirillo (ISTI - CNR) + * + */ + +public class GetSize extends Operation{ + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public GetSize(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long dim=0; + try { + dim = tm.getSize(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return ""+dim; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + } + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetTTL.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetTTL.java new file mode 100644 index 0000000..a29213c --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetTTL.java @@ -0,0 +1,69 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implements a getTTL operation for a resource locked in the remote system: return the TTL left + * @author Roberto Cirillo (ISTI - CNR) + */ + +public class GetTTL extends Operation { + + final Logger logger=LoggerFactory.getLogger(Download.class); + private String localPath; + private String remotePath; + private OutputStream os; + + public GetTTL(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + if (logger.isDebugEnabled()) { + logger.debug(" DOWNLOAD " + myFile.getRemotePath() + + " in bucket: " + bucket); + } + long currentTTL=-1; + TransportManager tm=null; + try { + //aggiungere field per il lock del file + TransportManagerFactory tmf=new TransportManagerFactory(server, user, password); + tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + currentTTL=tm.getTTL(bucket); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in getTTL operation ", e.getCause()); + } + return currentTTL+""; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + this.localPath=file.getLocalPath(); + this.remotePath=remotePath; + return getRemoteIdentifier(remotePath, rootArea); + + } + + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUrl.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUrl.java new file mode 100644 index 0000000..3f67828 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUrl.java @@ -0,0 +1,76 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + + + +public class GetUrl extends Operation{ + +// private OutputStream os; + TransportManager tm; + + public GetUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String initOperation(MyFile file, String remotePath, String author, + String[] server, String rootArea, boolean replaceOption) { + return getRemoteIdentifier(remotePath, rootArea); + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + + + @Override + public Object doIt(MyFile myFile) throws RemoteBackendException { + String resolverHost=myFile.getResolverHOst(); + String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR; + String urlParam=""; + try { + String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference()); + String phrase=myFile.getPassPhrase(); +// urlParam =new StringEncrypter("DES", phrase).encrypt(id); + urlParam = new Encrypter("DES", phrase).encrypt(id); +// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8"); + } catch (EncryptionException e) { + throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause()); + } + String url=urlBase+urlParam; + logger.info("URL generated: "+url); + if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){ + return url.toString()+Costants.VOLATILE_URL_IDENTIFICATOR; + } + return url; + } + + private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){ + String id=null; + if(tm ==null){ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference); + } + try { + id = tm.getId(bucket, forceCreation); + } catch (Exception e) { + tm.close(); + throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return id; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalItems.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalItems.java new file mode 100644 index 0000000..cee8c99 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalItems.java @@ -0,0 +1,72 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetUserTotalItems extends Operation { + + + final Logger logger=LoggerFactory.getLogger(GetUserTotalItems.class); + + public GetUserTotalItems(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String dim=null; + logger.info("check user total items for user: "+getOwner()+ " user is "+user); + try { + dim = tm.getUserTotalItems(getOwner()); + } catch (Exception e) { + e.printStackTrace(); + tm.close(); + throw new RemoteBackendException(" Error in getUserTotalItems operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket+" for user: "+getOwner()); + } + return dim; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + setOwner(author); + if((remotePath != null) && (remotePath.length() > 0)){ +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author); + dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames); +// String bucketName=null; + } + return bucket=buck; + }else{ + logger.info("found empty remote path in input "); + return bucket; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalVolume.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalVolume.java new file mode 100644 index 0000000..f9a2300 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/GetUserTotalVolume.java @@ -0,0 +1,71 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetUserTotalVolume extends Operation { + + final Logger logger=LoggerFactory.getLogger(GetUserTotalVolume.class); +// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator"); + + public GetUserTotalVolume(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String dim=null; + logger.info("check user total volume for user: "+getOwner()+ " user is "+user); + try { + dim = tm.getUserTotalVolume(getOwner()); + } catch (Exception e) { + e.printStackTrace(); + tm.close(); + throw new RemoteBackendException(" Error in getUserTotalVolume operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return dim; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + setOwner(author); + if(remotePath!= null && remotePath.length()>0){ +// String[] dirs= remotePath.split(file_separator); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author); + dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames); + } + return bucket=buck; + }else{ + return bucket; + } + + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Link.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Link.java new file mode 100644 index 0000000..b5dc26b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Link.java @@ -0,0 +1,95 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.net.UnknownHostException; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class Link extends Operation{ + + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Download.class); + final Logger logger=LoggerFactory.getLogger(Download.class); + private String sourcePath; + private String destinationPath; + private MyFile resource; + public Link(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + this.sourcePath=file.getLocalPath(); + this.destinationPath=remotePath; + sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea); + setResource(file); + return bucket=destinationPath; + + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String id=null; + try { + id=tm.link(this); + } catch (UnknownHostException e) { + tm.close(); + logger.error("Problem in link from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage()); + throw new RemoteBackendException(" Error in link operation ", e.getCause()); + } + return id; + } + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { +// For terrastore, the name of bucket is formed: path_____fileName_____author +// String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + this.sourcePath=resource.getLocalPath(); + this.destinationPath=resource.getRemotePath(); + setResource(resource); + sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea); + return bucket=destinationPath; + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException; + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + +} + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Lock.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Lock.java new file mode 100644 index 0000000..262784f --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Lock.java @@ -0,0 +1,124 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.DownloadOperator; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implements a lock operation relative to a remote resource + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public abstract class Lock extends Operation { + + final Logger logger=LoggerFactory.getLogger(Download.class); + protected String localPath; + protected String remotePath; + protected OutputStream os; + protected MyFile resource; + protected Download download; + + public Lock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server, user,pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + if (logger.isDebugEnabled()) { + logger.debug(" DOWNLOAD " + myFile.getRemotePath() + + " in bucket: " + getBucket()); + } + String unlockKey=null; + try { + //aggiungere field per il lock del file + Download download = new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); + unlockKey=get(download, myFile, true); + } catch (Exception e) { + TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword()); + TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + throw new RemoteBackendException(" Error in lock operation ", e.getCause()); + } + return unlockKey; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + String bucketName=null; + setResource(file); + // create the directory bucket + if((remotePath.length()<23) || (remotePath.contains(Costants.FILE_SEPARATOR))){ + this.localPath=file.getLocalPath(); + this.remotePath=remotePath; + bucketName = new BucketCoding().bucketFileCoding(remotePath, rootArea); + }else{ + bucketName=remotePath; + } + setBucket(bucketName); + return bucketName; + + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String serverLocation) throws Exception; + + public String getLocalPath() { + return localPath; + } + + public void setLocalPath(String localPath) { + this.localPath = localPath; + } + + public String getRemotePath() { + return remotePath; + } + + public void setRemotePath(String remotePath) { + this.remotePath = remotePath; + } + + public OutputStream getOs() { + return os; + } + + public void setOs(OutputStream os) { + this.os = os; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + public Download getDownload() { + return download; + } + + public void setDownload(Download download) { + this.download = download; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Monitor.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Monitor.java new file mode 100644 index 0000000..a86b3d2 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Monitor.java @@ -0,0 +1,88 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +//import org.apache.log4j.Logger; +//import org.gcube.common.core.utils.logging.GCUBELog; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + + +import java.util.Vector; +/** + * A monitor class for the concurrent operations + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class Monitor { + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Monitor.class); + final Logger logger=LoggerFactory.getLogger(Monitor.class); + // request queue + private Vector requestQueue = new Vector(); + // fetch the first request in the queue + public synchronized MyFile getRequest(){ + if (logger.isDebugEnabled()) { + logger.debug("getRequest() - start"); + } + while (requestQueue.size() == 0){ + try { + wait(10000); + } + catch (InterruptedException e){ + logger.error("getRequest()", e); + } + } + MyFile myFile=requestQueue.remove(0); + notifyAll(); + if (logger.isDebugEnabled()) { + logger.debug("getRequest() - end"); + } + return myFile; + } + + public synchronized MyFile getRequest(ChunkProducer producer){ + if (logger.isDebugEnabled()) { + logger.debug("getRequest(ChunkProducer) - start"); + } + while (requestQueue.size() == 0){ + try { + wait(); + } + catch (InterruptedException e){ + logger.error("getRequest(ChunkProducer)", e); + } + } + MyFile myFile=requestQueue.remove(0); + notifyAll(); + if (logger.isDebugEnabled()) { + logger.debug("getRequest(ChunkProducer) - end"); + } + return myFile; + } + + // Accoda una nuova richiesta + public synchronized void putRequest(MyFile richiesta){ + if (logger.isDebugEnabled()) { + logger.debug("putRequest(MyFile) - start"); + logger.debug("request in queue, queue size: "+requestQueue.size()); + } + + while (requestQueue.size() > Costants.MAX_THREAD){ + try { + wait(); + } + catch (InterruptedException e){ + logger.error("putRequest(MyFile)", e); + } + } + requestQueue.addElement(richiesta); + notifyAll(); + if (logger.isDebugEnabled()) { + logger.debug("putRequest(MyFile) - end"); + } + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Move.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Move.java new file mode 100644 index 0000000..ddd95ec --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Move.java @@ -0,0 +1,96 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; +import java.net.UnknownHostException; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class Move extends Operation{ + + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Download.class); + final Logger logger=LoggerFactory.getLogger(Download.class); + protected String sourcePath; + protected String destinationPath; + protected MyFile resource; + public Move(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + this.sourcePath=file.getLocalPath(); + this.destinationPath=remotePath; + sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea); + setResource(file); + return bucket=destinationPath; + + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String id=null; + try { +// id=tm.move(myFile, sourcePath, destinationPath); + id=tm.move(this); + } catch (UnknownHostException e) { + tm.close(); + logger.error("Problem in move from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage()); + throw new RemoteBackendException(" Error in move operation ", e.getCause()); + } + return id; + } + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { + this.sourcePath=resource.getLocalPath(); + this.destinationPath=resource.getRemotePath(); + sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea); + + return bucket=destinationPath; + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MemoryType memoryType, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException; + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/MoveDir.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/MoveDir.java new file mode 100644 index 0000000..a75821f --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/MoveDir.java @@ -0,0 +1,95 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.net.UnknownHostException; +import java.util.List; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class MoveDir extends Operation{ + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Download.class); + final Logger logger=LoggerFactory.getLogger(Download.class); + private String sourcePath; + private String destinationPath; + private MyFile resource; +// private OutputStream os; + public MoveDir(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + this.sourcePath=file.getLocalPath(); + this.destinationPath=remotePath; + sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea); + setResource(file); + return bucket=destinationPath; + + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + Listids=null; + try { + ids=tm.moveDir(this); + } catch (UnknownHostException e) { + tm.close(); + logger.error("Problem in moveDir from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage()); + throw new RemoteBackendException(" Error in moveDir operation ", e.getCause()); + } + return ids.toString(); + } + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { + this.sourcePath=resource.getLocalPath(); + this.destinationPath=resource.getRemotePath(); + sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea); + setResource(resource); + return bucket=destinationPath; + } + + public abstract List execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath, MemoryType memoryType) throws UnknownHostException; + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Operation.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Operation.java new file mode 100644 index 0000000..0146e2d --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Operation.java @@ -0,0 +1,385 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +/** + * Define the utilities function for the sub classes operations + * @author Roberto Cirillo (ISTI - CNR) + * + */ + +public abstract class Operation { + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(Operation.class); + + String[] server; + String user; + private String owner; + String password; + String bucket; + String[] dbNames; + private Monitor monitor; + private boolean isChunk; + String backendType; + + public Operation(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs){ + this.server=server; + this.user=user; + this.password=pwd; + this.bucket=bucket; + this.monitor=monitor; + this.isChunk=isChunk; + this.backendType=backendType; + this.dbNames=dbs; + } + + protected int numOfThread(int totChunks) { + if((totChunks> Costants.MIN_THREAD) &&(totChunks < Costants.MAX_THREAD)){ + int returnint = totChunks - 1; + return returnint; + }else if(totChunks > Costants.MAX_THREAD){ + return Costants.MAX_THREAD; + }else{ + return 1; + } + } + + + protected int getLengthCurrentChunk(long len, int i, int dimChunk) { + int lengthCurrentChunk=0; + if(((i+1)*dimChunk) <= len){ + lengthCurrentChunk=dimChunk; + }else{ + lengthCurrentChunk=(int) (len - (i*dimChunk)); + } + return lengthCurrentChunk; + } + + + protected int getNumberOfChunks(long len, long dimChunk) { + if(len< dimChunk) + return 1; + else if((len%dimChunk)>0){ + long returnint = (len / dimChunk) + 1; + return (int)returnint; + }else{ + long returnint = (len / dimChunk); + return (int)returnint; + } + } + + /** + * Upload operation + * @param resource object that contains the resource coordinates + * @param isChunk if the file is in chunk + * @param isBase64 if is in base64 coding + * @param replaceOption if the file will be replaced + * @param isLock if the file is lock + * @return a String that identifies a file + * @throws Exception + */ + public String put(Upload upload, MyFile resource, boolean isChunk, boolean isBase64, boolean replaceOption, boolean isLock) throws Exception{ + if (logger.isDebugEnabled()) { + logger.debug("put(MyFile, boolean, boolean) - start"); + } + long len=1; + if(resource.getLocalPath()!=null) + len=new File(resource.getLocalPath()).length(); + if(logger.isDebugEnabled()){ + logger.debug("file size: "+len); + } + long dimensionChunk=0; + if(logger.isDebugEnabled()) + logger.debug("PUT is chukn? "+isChunk); + if(isChunk){ + ChunkOptimization chunkOptimization=new ChunkOptimization(len); + dimensionChunk=chunkOptimization.chunkCalculation(); + }else{ + if(len==0){ + dimensionChunk=1; + len=1; + }else{ + dimensionChunk=len; + } + + } + if (logger.isDebugEnabled()) { + logger.debug("put(MyFile, boolean, boolean) - encode length: " + + len); + } +// number of chunks calculation + int totChunks=1; + if(logger.isDebugEnabled()) + logger.debug("len File: "+len+" len chunk: "+dimensionChunk); + totChunks=getNumberOfChunks(len, dimensionChunk); + if (logger.isDebugEnabled()) { + logger.debug("put(MyFile, boolean, boolean) - number of chunks: " + + totChunks); + } + int nThreads=1; + if(totChunks>1){ + nThreads=numOfThread(totChunks); + } + if (logger.isDebugEnabled()) { + logger.debug("put(MyFile, boolean, boolean) - number of thread: " + + nThreads); + } + if(logger.isDebugEnabled()) + logger.debug("consumer have a bucket name: "+bucket); + if(totChunks>1){ + if(logger.isDebugEnabled()) + logger.debug("THREAD POOL USED"); + ChunkConsumer consumer= new ChunkConsumer(monitor, 1, server, user, password, dbNames, isChunk, bucket, replaceOption); + Thread producer=new Thread(new ChunkProducer(monitor, resource, dimensionChunk, totChunks, nThreads, bucket, consumer)); + producer.start(); + if (logger.isDebugEnabled()) { + logger.debug("put(MyFile, boolean, boolean) - end"); + } + producer.join(); + return null; + }else{ + if(logger.isDebugEnabled()) + logger.debug("NO THREAD POOL USED"); + TransportManagerFactory tmf=new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference()); + String objectId=tm.uploadManager(upload, resource, bucket, bucket+"_1", replaceOption); + return objectId; + } + } + + /** + * Download operation + * @param myFile object that contains the resource coordinates + * @throws IOException + * @throws InterruptedException + */ + public String get(Download download, MyFile myFile, boolean isLock) throws IOException, InterruptedException, Exception { + if (logger.isDebugEnabled()) { + logger.debug("get(String) - start"); + } + String unlocKey=null; + TransportManagerFactory tmf=null; +// if(server.length >1) + tmf=new TransportManagerFactory(server, user, password); +// else +// tmf=new TransportManagerFactory(server, null, null); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long start=System.currentTimeMillis(); + String path=myFile.getLocalPath(); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + startPThreadChunk(download, myFile, tm, path); + + }else{ + unlocKey=tm.downloadManager(download, myFile, bucket, MyFile.class); + } + + if((path!=null) && (new File(path).length()>0)){ + if (logger.isDebugEnabled()) { + logger.debug("*** Time for downloading: " + + (System.currentTimeMillis() - start) + " ms "+"\n\n"); + } + } + return unlocKey; + } + + /** + * @param myFile + * @param tm + * @param path + * @throws FileNotFoundException + * @throws InterruptedException + * @throws IOException + */ + protected void startPThreadChunk(Download download,MyFile myFile, TransportManager tm, + String path) throws FileNotFoundException, InterruptedException, + IOException { + ExecutorService executor = Executors.newFixedThreadPool (2); + int j=0; + MyFile value=null; + + if(logger.isInfoEnabled()) + logger.info("localPath: "+path+" bucket: "+bucket); + OutputStream out =null; + if((path !=null) && (!path.isEmpty())) + out = new FileOutputStream(new File(path)); + do{ + value=null; +// String currentKey=bucket+j; + if (logger.isDebugEnabled()) { + logger.debug("get(String) -"); + } + try{ + value=(MyFile) tm.get(download); + }catch(Exception e){ + if (logger.isDebugEnabled()) { + logger.debug("get(String) - \n Trovate " + (j) + " key"); + } + value=null; + } + if(value!=null){ + if (logger.isDebugEnabled()) { + logger.debug("get(String) - write chunk , author: " + + value.getOwner()); + } + monitor.putRequest(value); + System.gc(); + executor.submit (new FileWriter(monitor, out)); + } + j++; + }while(value!=null); + executor.shutdown (); + executor.awaitTermination (Long.MAX_VALUE, TimeUnit.SECONDS); + out.flush(); + out.close(); + } + + protected String getRemoteIdentifier(String remotePath, String rootArea) { + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + protected String appendFileSeparator(String source) { + if(source.lastIndexOf(Costants.FILE_SEPARATOR) != (source.length()-1)) + source=source+Costants.FILE_SEPARATOR; + return source; + } + + protected String extractParent(String source) { + source=source.substring(0, source.length()-1); + String parent=source.substring(source.lastIndexOf(Costants.FILE_SEPARATOR)+1); + logger.debug("parent folder extracted: "+parent); + return parent; + } + + /** + * Do a operation + * @param myFile object that contains the resource coordinates + * @return a generic object that contains operation results + * @throws IllegalAccessException + */ + public abstract Object doIt(MyFile myFile) throws RemoteBackendException; + + /** + * init a operation + * @param file object that contains the resource coordinates + * @param remoteIdentifier remote path of the resource + * @param author file owner + * @param server server list + * @param rootArea remote root path + * @param replaceOption if true the file will be replaced + * @return a string that identifies the operation + */ + public abstract String initOperation(MyFile file, String remoteIdentifier, String author, String[] server, String rootArea, boolean replaceOption); + + + /** + * init a operation + * @param resource object that contains the resource coordinates + * @param remoteIdentifier remote path of the resource + * @param author file owner + * @param server server list + * @param rootArea remote root path + * @return a string that identifies the operation + */ + public abstract String initOperation(MyFile resource, String remoteIdentifier, String author, String[] server, String rootArea); + + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + this.owner = owner; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getBucket() { + return bucket; + } + + public void setBucket(String bucket) { + this.bucket = bucket; + } + + public String[] getDbNames() { + return dbNames; + } + + public void setDbNames(String[] dbNames) { + this.dbNames = dbNames; + } + + public Monitor getMonitor() { + return monitor; + } + + public void setMonitor(Monitor monitor) { + this.monitor = monitor; + } + + public boolean isChunk() { + return isChunk; + } + + public void setChunk(boolean isChunk) { + this.isChunk = isChunk; + } + + public String getBackendType() { + return backendType; + } + + public void setBackendType(String backendType) { + this.backendType = backendType; + } + + public String[] getServer() { + return server; + } + + public void setServer(String[] server) { + this.server = server; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + + +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationFactory.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationFactory.java new file mode 100644 index 0000000..7b98eda --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationFactory.java @@ -0,0 +1,114 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +//import terrastore.client.TerrastoreClient; + +/** + * + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class OperationFactory { + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(OperationFactory.class); + final Logger logger=LoggerFactory.getLogger(OperationFactory.class); +// TerrastoreClient client; + String[] server; + String bucket; + String user; + String password; + String[] dbNames; + Monitor monitor; + boolean isChunk; + private String backendType; + + public OperationFactory(String server[], String user, String pwd, String bucket, Monitor monitor2, boolean isChunk, String backendType, String[] dbs){ + this.server=server; + this.user=user; + this.password=pwd; + this.bucket=bucket; + this.monitor=monitor2; + this.isChunk=isChunk; + this.backendType=backendType; + this.dbNames=dbs; + } + + public Operation getOperation(String operation){ + if (logger.isInfoEnabled()) { + logger.info("getOperation(String) - start "+operation); + } + Operation op=null; + if(operation.equalsIgnoreCase("upload")){ + op=new UploadOperator(server, user, password, bucket , monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("download")){ + op= new DownloadOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("remove")){ + op=new Remove(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getSize")){ + op=new GetSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("duplicate")){ + op=new DuplicateOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("softcopy")){ + op=new SoftCopyOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getFolderSize")){ + op=new GetFolderSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getFolderCount")){ + op=new GetFolderCount(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getFolderLastUpdate")){ + op=new GetFolderLastUpdate(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getTotalUserItems")){ + op=new GetUserTotalItems(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getTotalUserVolume")){ + op=new GetUserTotalVolume(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("download+lock")){ + op=new DownloadAndLock(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("upload+unlock")){ + op=new UploadAndUnlock(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("lock")){ + op=new LockOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("unlock")){ + op=new UnlockOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getTTL")){ + op=new GetTTL(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("renewTTL")){ + op=new RenewTTL(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getUrl")){ + op=new GetUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getHttpUrl")){ + op=new GetHttpUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getHttpsUrl")){ + op=new GetHttpsUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("link")){ + op=new LinkOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("copy")){ + op=new SoftCopyOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("move")){ + op=new MoveOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("copy_dir")){ + op=new CopyDirOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("move_dir")){ + op=new MoveDirOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getMetaFile")){ + op=new GetMetaFile(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getMetaInfo")){ + op=new GetMetaInfo(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("setMetaInfo")){ + op=new SetMetaInfo(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("exist")){ + op=new Exist(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else if(operation.equalsIgnoreCase("getRemotePath")){ + op=new GetRemotePath(server, user, password, bucket, monitor, isChunk, backendType, dbNames); + }else{ + logger.error("getOperation(String) - Invalid Operation"); + } + if (logger.isDebugEnabled()) { + logger.debug("getOperation(String) - end"); + } + return op; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationManager.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationManager.java new file mode 100644 index 0000000..ebd9cf2 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/OperationManager.java @@ -0,0 +1,148 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +//import terrastore.client.TerrastoreClient; + +/** + * This is the manager of the operation on file-object. + * The number of threads in upload and the chunk threshold is determined in this class + * (TODO) build and send accounting report + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class OperationManager { + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(OperationManager.class); + private String[] server; +// private int dimension; + private String operation; + private MyFile resource; + private boolean isChunk; + private String bucketName; + private String fileDest; + private String backendType; + private boolean isBase64; + private String user; + private String password; + private String[] dbNames; + + + public OperationManager(String[] server, String user, String password, String operation, MyFile myFile, String backendType, String[] dbs){ + this.setServer(server); + this.setUser(user); + this.setPassword(password); + this.setTypeOperation(operation); + this.setResource(myFile); + this.setTypeOperation(operation); + this.setDbNames(dbs); + this.backendType=backendType; + } + + public Object startOperation(MyFile file, String remotePath, String author, String[] server, boolean chunkOpt, String rootArea, boolean replaceOption) throws RemoteBackendException{ +// setUser(author); + if (logger.isDebugEnabled()) { + logger.debug("connection(boolean) - start"); + } + logger.info("startOpertion getResource..getGcubeAccessType()= "+getResource().getGcubeAccessType()+" file..getGcubeAccessType() "+file.getGcubeAccessType()); + // creo il monitor + Monitor monitor = new Monitor(); + OperationFactory of=new OperationFactory(server, getUser(), getPassword(), getBucketName(), monitor, chunkOpt, getBackendType(), getDbNames()); + Operation op=of.getOperation(getTypeOperation()); + //start specific operation + setBucketName(op.initOperation(file, remotePath, author, server, rootArea, replaceOption)); + Object object=op.doIt(getResource()); + return object; + } + + + private String getBackendType() { + return backendType; + } + + public String getBucketName() { + return bucketName; + } + + public void setBucketName(String bucketName) { + this.bucketName = bucketName; + } + + public String getFileDest() { + return fileDest; + } + + public void setFileDest(String fileDest) { + this.fileDest = fileDest; + } + + public boolean isChunk() { + return isChunk; + } + + public void setChunk(boolean isChunk) { + this.isChunk = isChunk; + } + + public String[] getServer() { + return server; + } + + public void setServer(String[] server) { + this.server = server; + } + + public String getUser() { + return user; + } + + public String getPassword() { + return password; + } + + public void setUser(String user) { + this.user = user; + } + + public void setPassword(String pwd) { + this.password = pwd; + } + + public String getTypeOperation() { + return operation; + } + + public void setTypeOperation(String operation) { + this.operation = operation; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + public boolean isBase64() { + return isBase64; + } + + public void setBase64(boolean isBase64) { + this.isBase64 = isBase64; + } + + public String[] getDbNames() { + return dbNames; + } + + public void setDbNames(String[] dbNames) { + this.dbNames = dbNames; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Remove.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Remove.java new file mode 100644 index 0000000..4d5f7fe --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Remove.java @@ -0,0 +1,88 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implements a remove operation from the cluster: remove a file object + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class Remove extends Operation{ + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(Remove.class); + + public Remove(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server,user,pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + removeBucket(tm, bucket, myFile); + if (logger.isDebugEnabled()) { + logger.debug(" REMOVE " + bucket); + } + return "removed"; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + String[] dirs= remotePath.split(Costants.FILE_SEPARATOR); + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; +// in this case the remote path is really a remote path and not a objectId + if((dirs != null) && ((dirs.length >1) || ((dirs.length==1) && (dirs[0].length()<23)))){ + BucketCoding bc=new BucketCoding(); + buck=bc.bucketFileCoding(remotePath, rootArea); + if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){ + buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR); + //remove directory bucket + } + }else{ + // is an object id + buck=remotePath; + } + +// bucketName=new BucketCoding().bucketFileCoding(remotePath, author, rootArea); + return bucket=buck; + } + + /** + * Remove a remote directory identifies by bucketName + * @param bucketName indicates the remote directory to remove + * @throws RemoteBackendException + */ + public void removeBucket(TransportManager tm, String bucketName, MyFile resource) throws RemoteBackendException { + if(logger.isDebugEnabled()) + logger.debug("removing file bucket: "+bucketName); + try { + tm.removeRemoteFile(bucket, resource); + } catch (Exception e) { + tm.close(); + logger.error("Problem in remove: "+bucket+": "+e.getMessage()); + throw new RemoteBackendException(" Error in remove operation ", e.getCause()); + } + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with remove operation"); + } + +} + + + + diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/RenewTTL.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/RenewTTL.java new file mode 100644 index 0000000..ea459c9 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/RenewTTL.java @@ -0,0 +1,64 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Implements a Renew TTL operation for a locked remote resource + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class RenewTTL extends Operation { + + final Logger logger=LoggerFactory.getLogger(Download.class); + private String localPath; + private String remotePath; + private OutputStream os; + + public RenewTTL (String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + long ttl=-1; + try { + myFile.setRemotePath(bucket); + ttl = tm.renewTTL(myFile); + } catch (Throwable e) { + tm.close(); + throw new RemoteBackendException(" Error in renew TTL operation ", e.getCause()); + } + return ttl+""; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + this.localPath=file.getLocalPath(); + this.remotePath=remotePath; + String bucketName = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=bucketName; + + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SetMetaInfo.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SetMetaInfo.java new file mode 100644 index 0000000..f6aad4e --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SetMetaInfo.java @@ -0,0 +1,61 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SetMetaInfo extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(GetSize.class); + + public SetMetaInfo(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + try { + tm.setFileProperty(bucket, myFile.getGenericPropertyField(), myFile.getGenericPropertyValue()); + } catch (Exception e) { + tm.close(); + e.printStackTrace(); + throw new RemoteBackendException(" Error in SetMetaInfo operation ", e.getCause()); } + if (logger.isDebugEnabled()) { + logger.debug(" PATH " + bucket); + } + return "1"; + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, boolean replaceOption) { + if(logger.isDebugEnabled()) + logger.debug("remotePath: "+remotePath); + String buck=null; + boolean isId=ObjectId.isValid(remotePath); + if(!isId){ + buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=buck; + }else{ + return bucket=remotePath; + } + } + + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation"); + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SoftCopy.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SoftCopy.java new file mode 100644 index 0000000..15834e4 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/SoftCopy.java @@ -0,0 +1,130 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.net.UnknownHostException; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public abstract class SoftCopy extends Operation { + + /** + * Logger for this class + */ + final Logger logger=LoggerFactory.getLogger(SoftCopy.class); + private String sourcePath; + private String destinationPath; + private MyFile resource; + + + public SoftCopy(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) { +// if(remotePath != null){ +// boolean isId=ObjectId.isValid(remotePath); +// setResource(file); +// if(!isId){ +//// String[] dirs= remotePath.split(file_separator); +// if(logger.isDebugEnabled()) +// logger.debug("remotePath: "+remotePath); +// String buck=null; +// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); +// return bucket=buck; +// }else{ +// return bucket=remotePath; +// } +// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null"); + + this.sourcePath=file.getLocalPath(); + this.destinationPath=remotePath; + sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea); + setResource(file); + return bucket=destinationPath; + + } + + public String doIt(MyFile myFile) throws RemoteBackendException{ + TransportManagerFactory tmf= new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + String id=null; + try { + id=tm.softCopy(this); + } catch (UnknownHostException e) { + tm.close(); + logger.error("Problem in copy from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage()); + throw new RemoteBackendException(" Error in copy operation ", e.getCause()); + } + return id; + } + + + @Override + public String initOperation(MyFile resource, String remotePath, String author, String[] server, String rootArea) { +// For terrastore, the name of bucket is formed: path_____fileName_____author + this.sourcePath=resource.getLocalPath(); + this.destinationPath=resource.getRemotePath(); + sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea); + destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea); + setResource(resource); + return bucket=destinationPath; +// if(remotePath != null){ +// boolean isId=ObjectId.isValid(remotePath); +// setResource(resource); +// if(!isId){ +//// String[] dirs= remotePath.split(file_separator); +// if(logger.isDebugEnabled()) +// logger.debug("remotePath: "+remotePath); +// String buck=null; +// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea); +// return bucket=buck; +// }else{ +// return bucket=remotePath; +// } +// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null"); + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException; + + public String getSourcePath() { + return sourcePath; + } + + public void setSourcePath(String sourcePath) { + this.sourcePath = sourcePath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Unlock.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Unlock.java new file mode 100644 index 0000000..02777d7 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Unlock.java @@ -0,0 +1,129 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; + + +/** + * Implements the unlock operation for a locked remote resource + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public abstract class Unlock extends Operation { + + private String keyUnlock; + protected String localPath; + protected String remotePath; + protected OutputStream os; + protected MyFile resource; + protected Upload upload; + + public Unlock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + if (logger.isDebugEnabled()) { + logger.debug(" UPLOAD " + myFile.getLocalPath() + + " author: " + myFile.getOwner()); + } + String objectId=null; + try { + Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); + //inserire parametro per il lock + objectId=put(upload, myFile, isChunk(), false, false, true); + } catch (Exception e) { + TransportManagerFactory tmf=new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + throw new RemoteBackendException(" Error in unlock operation ", e.getCause()); + } + return objectId; + + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + String bucketName=null; + // create the directory bucket + if((remotePath.length()<23) || (remotePath.contains(Costants.FILE_SEPARATOR))){ + // the name of bucket is formed: path_____fileName_____author + bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + }else{ + //is an ObjectId + bucketName=remotePath; + } + return bucket=bucketName; + + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, String key4unlock) throws Exception; + + public String getLocalPath() { + return localPath; + } + + public void setLocalPath(String localPath) { + this.localPath = localPath; + } + + public String getRemotePath() { + return remotePath; + } + + public void setRemotePath(String remotePath) { + this.remotePath = remotePath; + } + + public OutputStream getOs() { + return os; + } + + public void setOs(OutputStream os) { + this.os = os; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + public Upload getUpload() { + return upload; + } + + public void setUpload(Upload upload) { + this.upload = upload; + } + + public String getKeyUnlock() { + return keyUnlock; + } + + public void setKeyUnlock(String keyUnlock) { + this.keyUnlock = keyUnlock; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Upload.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Upload.java new file mode 100644 index 0000000..4328e1d --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/Upload.java @@ -0,0 +1,165 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Implements a upload operation from the cluster: upload a file object + * @author Roberto Cirillo (ISTI - CNR) + * + */ + +public abstract class Upload extends Operation { + /** + * Logger for this class + */ +// private static final GCUBELog logger = new GCUBELog(Upload.class); + final Logger logger=LoggerFactory.getLogger(Upload.class); + protected InputStream is; + private boolean replaceOption; + protected String localPath; + protected String remotePath; + protected OutputStream os; + protected MyFile resource; + + public Upload(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String bck, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, bck, dbs); + } + + + + public String doIt(MyFile myFile) throws RemoteBackendException{ + if (logger.isDebugEnabled()) { + logger.debug(" UPLOAD " + myFile.getLocalPath() + + " author: " + myFile.getOwner()); + } + String objectId=null; + try { + objectId=put(this, myFile, isChunk(), false, replaceOption, false); + } catch (Throwable e) { + TransportManagerFactory tmf=new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + logger.error("Problem in upload from: "+myFile.getLocalPath()+": "+e.getMessage()); + throw new RemoteBackendException(" Error in upload operation ", e.getCause()); + } + return objectId; + } + + + + + @Override + public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) { + // set replace option + this.replaceOption=replaceOption; + setResource(file); +//patch id: check if remotePath is not an id + if(remotePath.contains(Costants.FILE_SEPARATOR)){ + // the name of bucket is formed: path_____fileName_____author + String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=bucketName; + }else{ + return bucket=remotePath; + } + } + + + + @Override + public String initOperation(MyFile resource, String remotePath, + String author, String[] server, String rootArea) { + // the name of bucket is formed: path_____fileName_____author + String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + setResource(resource); + this.is=resource.getInputStream(); + return bucket=bucketName; + } + + + public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, boolean replace) throws IOException; + + public InputStream getIs() { + return is; + } + + + + public void setIs(InputStream is) { + this.is = is; + } + + + + public boolean isReplaceOption() { + return replaceOption; + } + + + + public void setReplaceOption(boolean replaceOption) { + this.replaceOption = replaceOption; + } + + + + public String getLocalPath() { + return localPath; + } + + + + public void setLocalPath(String localPath) { + this.localPath = localPath; + } + + + + public String getRemotePath() { + return remotePath; + } + + + + public void setRemotePath(String remotePath) { + this.remotePath = remotePath; + } + + + + public OutputStream getOs() { + return os; + } + + + + public void setOs(OutputStream os) { + this.os = os; + } + + + + public MyFile getResource() { + return resource; + } + + + + public void setResource(MyFile resource) { + this.resource = resource; + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/UploadAndUnlock.java b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/UploadAndUnlock.java new file mode 100644 index 0000000..73feb72 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/service/operation/UploadAndUnlock.java @@ -0,0 +1,62 @@ +package org.gcube.contentmanagement.blobstorage.service.operation; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator; +/** + * @deprecated + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class UploadAndUnlock extends Operation { + +// private String keyUnlock; + + public UploadAndUnlock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) { + // TODO Auto-generated constructor stub + super(server,user, pwd, bucket, monitor, isChunk, backendType, dbs); + } + + @Override + public String doIt(MyFile myFile) throws RemoteBackendException { + if (logger.isDebugEnabled()) { + logger.debug(" UPLOAD " + myFile.getLocalPath() + + " author: " + myFile.getOwner()); + } + Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); + String objectId=null; + try { + //inserire parametro per il lock + objectId=put(upload, myFile, isChunk(), false, false, true); + } catch (Exception e) { + TransportManagerFactory tmf=new TransportManagerFactory(server, user, password); + TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference()); + tm.close(); + throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e.getCause()); } + return objectId; + + } + + @Override + public String initOperation(MyFile file, String remotePath, + String author, String[] server, String rootArea, + boolean replaceOption) { + // set replace option +// this.replaceOption=replaceOption; + // the name of bucket is formed: path_____fileName_____author + String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea); + return bucket=bucketName; + + } + + @Override + public String initOperation(MyFile resource, String RemotePath, + String author, String[] server, String rootArea) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/test/SimpleTest2.java b/src/main/java/org/gcube/contentmanagement/blobstorage/test/SimpleTest2.java new file mode 100644 index 0000000..f085464 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/test/SimpleTest2.java @@ -0,0 +1,28 @@ +package org.gcube.contentmanagement.blobstorage.test; + +import java.util.List; +import org.gcube.contentmanagement.blobstorage.service.IClient; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.resource.StorageObject; + + + +public class SimpleTest2 { + + public static void main(String[] args) throws RemoteBackendException{ + String[] server=new String[]{"146.48.123.73","146.48.123.74" }; + + IClient client=new ServiceEngine(server, "rcirillo", "cnr", "private", "rcirillo"); +// String localFile="/home/rcirillo/FilePerTest/CostaRica.jpg"; + String remoteFile="/img/shared9.jpg"; + String newFile="/home/rcirillo/FilePerTest/repl4.jpg"; + client.get().LFile(newFile).RFile(remoteFile); + List list=client.showDir().RDir("/img/"); + for(StorageObject obj : list){ + System.out.println("obj found: "+obj.getName()); + } + String uri=client.getUrl().RFile(remoteFile); + System.out.println(" uri file: "+uri); + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManager.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManager.java new file mode 100644 index 0000000..e754342 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManager.java @@ -0,0 +1,357 @@ +package org.gcube.contentmanagement.blobstorage.transport; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Map; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.StorageObject; +import org.gcube.contentmanagement.blobstorage.service.operation.*; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.LockOperator; +import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UnlockOperator; + +import com.mongodb.MongoException; + +/** + * The Transport Manager presents the methods for the connection to the remote system. This class should be instantiated for connection on remote backend + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public abstract class TransportManager { + + + + /** + * This method specifies the type of the backend for dynamic loading + * For mongoDB, default backend, the name is MongoDB + * @return the backend name + */ + public abstract String getName(); + + /** + * This method set initialize and configure the backend servers + * @param server array that contains ip of backend server + * @param pass + * @param user + */ + public abstract void initBackend(String[] server, String user, String pass, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern); + + + /** + * Start the download operation. It contains logic to determine the correct operation based on the input parameters + * @param myFile object that contains the resource coordinates + * @param key remote path or objectId + * @param type class type of myFile object + * @return the key of remote resource + * @throws IOException if there are IO problems + */ + public String downloadManager(Download download, MyFile myFile, String key, Class type) throws Exception{ + String key4lock=null; + if(myFile.isLock()){ + download.setResource(myFile); + get(download); + Lock lock= new LockOperator(download.getServer(), download.getUser(), download.getPassword(), download.getBucket(), download.getMonitor(), download.isChunk(), download.getBackendType(), download.getDbNames()); + lock.setResource(myFile); + key4lock=lock(lock); + return key4lock; + }else{ +// return get(myFile, key, type).toString(); + return get(download).toString(); + } + } + + /** + * Start the upload operation. It contains logic to determine the correct operation based on the input parameters + * @param resource object that contains the resource coordinates + * @param bucket remote path or objectId + * @param key used only for chunk index operation + * @param replace if is true the file will be replaced + * @return the id of the remote resource + * @throws FileNotFoundException + * @throws UnknownHostException + */ + public String uploadManager(Upload upload, Object resource, String bucket, String key, boolean replace) throws Exception{ + String id=null; + MyFile file=(MyFile)resource; + if((file.getLockedKey()!=null) && (!file.getLockedKey().isEmpty())){ + Unlock unlock= new UnlockOperator(upload.getServer(), upload.getUser(), upload.getPassword(), upload.getBucket(), upload.getMonitor(), upload.isChunk(), upload.getBackendType(), upload.getDbNames()); + unlock.setResource(file); + unlock.setKeyUnlock(file.getLockedKey()); + id=unlock(unlock); + upload.setResource(file); + id=put(upload); + }else{ +// id=put(resource, bucket, key, replace); + id=put(upload); + } + return id; + } + /** + * get a object from the cluster + * @param myFile object that contains the resource coordinates + * @param key identifies a server location object: + * in Terrastore correspond to a key, in Mongo correspond to a objectid or a remote path + * @param type class type definition for casting operation + * @return generic object that identifies a remote resource + * @throws FileNotFoundException + * @throws IOException + */ +// public abstract Object get(MyFile myFile, String key, Class type) throws FileNotFoundException, IOException; + + /** + * get a object from the cluster + * @param myFile object that contains the resource coordinates + * @param key identifies a server location object: + * in Terrastore correspond to a key, in Mongo correspond to a objectid or a remote path + * @param type class type definition for casting operation + * @return generic object that identifies a remote resource + * @throws FileNotFoundException + * @throws IOException + */ + public abstract Object get(Download download) throws FileNotFoundException, IOException; + + + /** + * put a object on the cluster + * @param resource object that contains the resource coordinates + * @param bucket remote path or objectId + * @param key used for chunk file index or for unlock operation + * @throws MongoException + * @throws UnknownHostException + */ +// public abstract String put(Object resource, String bucket, String key, boolean replace) throws UnknownHostException; + public abstract String put(Upload upload) throws FileNotFoundException, IOException; + + /** + * get all values contained in a remote bucket (or remote directory) + * @param bucket remote path or objectId + * @param type class type of myFile object + * @return map that contains the object in the direcotry + * @throws UnknownHostException + */ + public abstract Map getValues(MyFile resource, String bucket, Class< ? extends Object> type); + + /** + * delete a remote file + * @param bucket identifies the remote file + * @throws UnknownHostException + */ + public abstract void removeRemoteFile(String bucket, MyFile resource) throws UnknownHostException; + + /** + * delete a remote directory + * @param remoteDir remote Directory path + * @param myFile + * @throws IllegalStateException + * @throws UnknownHostException + * + */ + public abstract void removeDir(String remoteDir, MyFile myFile) throws UnknownHostException; + + + /** + * get the size of the remote file + * @param bucket identifies the remote file path + * @return the size of the remote file + * @throws UnknownHostException + */ + public abstract long getSize(String bucket); + + /** + * lock a remote file + * @param resource object that contains the resource coordinates + * @param serverLocation remote path + * @param type class of resource + * @return the key that permits the object's unlock + * @throws IOException + * @throws Exception + */ +// public abstract String lock(MyFile resource, String serverLocation, +// Class type) throws IOException; + + public abstract String lock(Lock lock) throws Exception; + + /** + * unlock a remote file + * @param resource object that contains the resource coordinates + * @param bucket remote path + * @param key used only for chunk identifications + * @param key4unlock key used for unlock the remote file + * @return key for lock or null + * @throws FileNotFoundException + * @throws UnknownHostException + * @throws MongoException + * @throws Exception + */ +// public abstract String unlock(Object resource, String bucket, String key, +// String key4unlock) throws FileNotFoundException, +// UnknownHostException, MongoException; + + public abstract String unlock(Unlock unlock) throws FileNotFoundException, + UnknownHostException, MongoException, Exception; + + /** + * returns the TTL associated with a remote file + * @param pathServer file remote path + * @return the time of ttl + * @throws UnknownHostException + */ + public abstract long getTTL(String pathServer) throws UnknownHostException; + + /** + * renew the TTL associated with a remote file + * @param resource + * @return the TTL time left + * @throws UnknownHostException + * @throws IllegalAccessException + */ + public abstract long renewTTL(MyFile resource) throws UnknownHostException, IllegalAccessException; + + + /** + * link the destination resource to the source resource. In this operation the payload of the file is the same. The metadata will be changed + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ +// public abstract String link(MyFile resource, String source, String destination) throws UnknownHostException; + public abstract String link(Link link) throws UnknownHostException; + + /** + * copy a remote resource from source path to destination path. In this case the payload will be duplicated + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ +// public abstract String copy(MyFile resource, String source, String destination) throws UnknownHostException; + + + /** + * copy a remote resource from source path to destination path. In this case the payload will be duplicated + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ + public abstract String copy(Copy copy) throws UnknownHostException; + + /** + * Move a remote resource from source path to destination path + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ +// public abstract String move(MyFile resource, String source, String destination) throws UnknownHostException; + public abstract String move(Move move) throws UnknownHostException; + + /** + * copy a remote folder from source path to destination path. + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ +// public abstract List copyDir(MyFile resource, String source, String destination) throws UnknownHostException; + public abstract List copyDir(CopyDir copy) throws UnknownHostException; + + /** + * Move a remote folder from source path to destination path + * @param resource resource object + * @param source complete path of the source resource + * @param destination complete path of the destination resource + * @return id of the new resource + * @throws UnknownHostException + */ +// public abstract List moveDir(MyFile resource, String source, String destination) throws UnknownHostException; + public abstract List moveDir(MoveDir move) throws UnknownHostException; + + /** + * Get a generic metadata from a remote file ex: owner, creationDate, link + * @param remotePath remote file path + * @param property property key + * @return property value + * @throws UnknownHostException + */ + public abstract String getFileProperty(String remotePath, String property); + + /** + * Get the number of files in a folder + * @param folderPath: the folder path + * @return the number of files contained in the folder + * @throws UnknownHostException + */ + public abstract long getFolderTotalItems(String folderPath); + + /** + * Get the total Volume in the folder specified by input parameter folderPath + * @param folderPath: the path of the folder + * @return the folder size + * @throws UnknownHostException + */ + public abstract long getFolderTotalVolume(String folderPath); + + /** + * Get the total Volume of files uploaded by a user specified in input parameter user + * @param user: the username + * @return the total + * @throws UnknownHostException + */ + public abstract String getUserTotalVolume(String user); + + /** + * Get the number of files uploaded by a user + * @param user: username + * @return the total + * @throws UnknownHostException + */ + public abstract String getUserTotalItems(String user); + + public abstract boolean isValidId(String id); + + public abstract String getId(String remoteIdentifier, boolean forceCreation); + + public abstract String getField(String remoteIdentifier, String fieldName) throws UnknownHostException ; + + public abstract void close(); + + public abstract void setFileProperty(String remotePath, String propertyField, String propertyValue); + + + public abstract String getRemotePath(String bucket)throws UnknownHostException; + + /** + * @param bucket + * @return + */ + public abstract boolean exist(String bucket); + + /** + * @param bucket remote path or objectId + * @return + */ +// public abstract String duplicateFile(MyFile resource, String bucket); + public abstract String duplicateFile(DuplicateFile duplicate); + +// public String softCopy(MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException{return null;} + public String softCopy(SoftCopy copy) throws UnknownHostException{return null;} + + /** + * @param move + * @return + * @throws UnknownHostException + */ + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManagerFactory.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManagerFactory.java new file mode 100644 index 0000000..e7bda1b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/TransportManagerFactory.java @@ -0,0 +1,76 @@ +package org.gcube.contentmanagement.blobstorage.transport; + + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.ServiceLoader; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +//import terrastore.client.TerrastoreClient; +/** + * Transport manager factory + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class TransportManagerFactory { + + /** + * Logger for this class + */ +// private static final Logger logger = Logger.getLogger(OperationFactory.class); + final Logger logger = LoggerFactory.getLogger(TransportManagerFactory.class); +// TerrastoreClient client; + String[] server; + String user; + String password; + + public TransportManagerFactory(String server[], String user, String password){ + this.server=server; + this.user=user; + this.password=password; + } + + public TransportManager getTransport(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){ + if (logger.isDebugEnabled()) { + logger.debug("getOperation(String) - start"); + } + return load(backendType, memoryType, dbNames, writeConcern, readConcern); + } + + private TransportManager load(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){ + ServiceLoader loader = ServiceLoader.load(TransportManager.class); + Iterator iterator = loader.iterator(); + List impls = new ArrayList(); + while(iterator.hasNext()) + impls.add(iterator.next()); + int implementationCounted=impls.size(); +// System.out.println("size: "+implementationCounted); + if(implementationCounted==0){ + logger.info(" 0 implementation found. Load default implementation of TransportManager"); + return new MongoOperationManager(server, user, password, memoryType, dbNames, writeConcern, readConcern); + }else if(implementationCounted==1){ + TransportManager tm = impls.get(0); + logger.info("1 implementation of TransportManager found. Load it. "+tm.getName()); + tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern); + return tm; + }else{ + logger.info("found "+implementationCounted+" implementations of TransportManager"); + logger.info("search: "+backendType); + for(TransportManager tm : impls){ + if(tm.getName().equalsIgnoreCase(backendType)){ + logger.info("Found implementation "+backendType); + return tm; + } + } + throw new IllegalStateException("Mismatch Backend Type and RuntimeResource Type. The backend type expected is "+backendType); + } + + } + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/BsonOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/BsonOperator.java new file mode 100644 index 0000000..b5c9c7b --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/BsonOperator.java @@ -0,0 +1,49 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend; + +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.gridfs.GridFS; +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class BsonOperator { + + private GridFS gfs; +// private GridFSDBFile gfsFile; +// private String dbName; +// private BasicDBObject dbObject; + private Logger logger = LoggerFactory.getLogger(BsonOperator.class); + + public BsonOperator(GridFS gfs){ + this.gfs=gfs; +// this.dbName=dbName; + + } + + protected List getFilesOnFolder(String folderPath) { + BasicDBObject queryFile = new BasicDBObject(); + queryFile.put("dir", java.util.regex.Pattern.compile(folderPath+"*")); + List list=gfs.find(queryFile); + logger.info("retrieveRemoteFileObject found "+list.size()+" objects "); + return list; + } + + protected List getOwnedFiles(String username){ + BasicDBObject queryFile = new BasicDBObject(); + queryFile.put("owner", username); + List list=gfs.find(queryFile); + logger.info("retrieveUsersFileObjectfound "+list.size()+" objects "); + return list; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/CollectionOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/CollectionOperator.java new file mode 100644 index 0000000..0c5bb06 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/CollectionOperator.java @@ -0,0 +1,50 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBCollection; +import com.mongodb.gridfs.GridFS; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class CollectionOperator { + + private GridFS gfs; + private DBCollection collection; + private BasicDBObject dbObject; + + public CollectionOperator(GridFS gfs){ + setGfs(gfs); + } + + public GridFS getGfs() { + return gfs; + } + + public void setGfs(GridFS gfs) { + this.gfs = gfs; + } + + public DBCollection getCollection() { + return collection; + } + + public void setCollection(DBCollection collection) { + this.collection = collection; + } + + public BasicDBObject getDbObject() { + return dbObject; + } + + public void setDbObject(BasicDBObject dbObject) { + this.dbObject = dbObject; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/HttpTerrastoreClient.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/HttpTerrastoreClient.java new file mode 100644 index 0000000..7c5cf4f --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/HttpTerrastoreClient.java @@ -0,0 +1,87 @@ +//package org.gcube.contentmanagement.blobstorage.transport.backend; +// +// +//import java.net.UnknownHostException; +//import java.util.Arrays; +//import java.util.Map; +// +//import org.gcube.contentmanagement.blobstorage.resource.MyFile; +//import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +// +//import com.mongodb.MongoException; +// +//import terrastore.client.TerrastoreClient; +//import terrastore.client.connection.OrderedHostManager; +//import terrastore.client.connection.resteasy.HTTPConnectionFactory; +// +///** +// * Terrastore Transport layer +// * @author Roberto Cirillo (ISTI - CNR) +// * +// */ +//public class HttpTerrastoreClient extends TransportManager{ +// +// private String[] server; +// private TerrastoreClient client; +// +// public HttpTerrastoreClient(String[] server) { +// client=new TerrastoreClient(new OrderedHostManager(Arrays.asList(server)), new HTTPConnectionFactory()); +// } +// +// @Override +// public Object get(MyFile resource, String key, Class type) { +// Object ret=null; +// if((resource.getPathClient()!=null) && (!resource.getPathClient().isEmpty())) +// ret=client.bucket(resource.getPathClient()).key(key).get(type); +// else +// throw new IllegalArgumentException("Local path not found"); +// return ret; +// } +// +// @Override +// public String put(Object resource, String bucket, String key, boolean replaceOption) { +// //replace option is ignored +// client.bucket(bucket).key(key).put(resource); +// return null; +// } +// +// @Override +// public Map getValues(String bucket, Class type) { +// return client.bucket(bucket).values().get(type); +// } +// +// @Override +// public void clearBucket(String bucket) { +// client.bucket(bucket).clear(); +// +// } +// +// @Override +// public void removeKey(String bucket, String key) { +// client.bucket(bucket).key(key).remove(); +// +// } +// +// @Override +// public Map getValuesPredicate(String bucket, Class< ? extends Object> type, String predicate) { +// return client.bucket(bucket).predicate(predicate).get(type); +// +// } +// +// @Override +// public void removeDir(String remoteDir){} +// +// @Override +// public long getTTL(String pathServer) throws UnknownHostException, +// MongoException { +// throw new IllegalArgumentException("This operation is not compatible with this client"); +// } +// +// @Override +// public long renewTTL(MyFile resource) throws UnknownHostException, +// MongoException { +// throw new IllegalArgumentException("This operation is not compatible with this client"); +//} +// +// +//} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoIOManager.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoIOManager.java new file mode 100644 index 0000000..2df89ca --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoIOManager.java @@ -0,0 +1,1152 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.service.operation.Operation; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.MongoInputStream; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.MongoOutputStream; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.DBCollection; +import com.mongodb.DBCursor; +import com.mongodb.DBObject; +import com.mongodb.MongoClient; +import com.mongodb.MongoClientOptions; +import com.mongodb.MongoCredential; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.gridfs.GridFS; +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSInputFile; + + +public class MongoIOManager { + + private DB db; + private String[] server; + private MongoClient mongo; + private String user; + private String password; + private Logger logger = LoggerFactory.getLogger(MongoIOManager.class); + private GridFS gfs; + private MemoryType memoryType; + private String dbName; + protected String writeConcern; + protected String readPreference; + + protected MongoIOManager(String[] server, String user, String password, MemoryType memoryType, String dbName, String writeConcern, String readPreference){ + setServer(server); + setUser(user); + setPassword(password); + setMemoryType(memoryType); + setDbName(dbName); + setWriteConcern(writeConcern); + setReadPreference(readPreference); + } + + + public DB getConnectionDB(String dbName, boolean readwritePreferences){ + if(db==null){ + try{ + + List srvList=new ArrayList(); + for(String srv : server){ + srvList.add(new ServerAddress(srv)); + } + if(mongo==null){ + logger.debug(" open mongo connection "); + MongoClientOptions options=null; + if ((!Utils.isVarEnv(Costants.NO_SSL_VARIABLE_NAME)) && (Costants.DEFAULT_CONNECTION_MODE.equalsIgnoreCase("SSL"))){ +// for enable SSL use the following instructions +// System.setProperty("javax.net.ssl.trustStore", "/usr/local/lib/jvm/jdk1.8.0_151/jre/lib/security/cacerts"); +// System.setProperty("javax.net.ssl.trustStorePassword", "changeit"); + options=MongoClientOptions.builder().sslEnabled(true).sslInvalidHostNameAllowed(true).connectionsPerHost(Costants.CONNECTION_PER_HOST).connectTimeout(Costants.CONNECT_TIMEOUT).build(); + }else{ + if((Costants.DEFAULT_CONNECTION_MODE.equalsIgnoreCase("NO-SSL")) || (Utils.checkVarEnv(Costants.NO_SSL_VARIABLE_NAME).equalsIgnoreCase("TRUE"))){ +// for disable ssl use the following instruction + options=MongoClientOptions.builder().connectionsPerHost(Costants.CONNECTION_PER_HOST).connectTimeout(Costants.CONNECT_TIMEOUT).build(); + }else{ + options=MongoClientOptions.builder().sslEnabled(true).sslInvalidHostNameAllowed(true).connectionsPerHost(Costants.CONNECTION_PER_HOST).connectTimeout(Costants.CONNECT_TIMEOUT).build(); + } + } + if(((password != null) && (password.length() >0)) && ((user != null) && (user.length() > 0))){ + MongoCredential credential = MongoCredential.createCredential(user, dbName, password.toCharArray()); + mongo = new MongoClient(srvList, Arrays.asList(credential), options); + }else{ + mongo = new MongoClient(srvList, options); + } + logger.debug("Istantiate MongoDB with options: "+mongo.getMongoClientOptions()); + } + db = mongo.getDB(dbName); + if((readwritePreferences) && (!(memoryType== MemoryType.VOLATILE)) && (srvList.size()>1)){ + if(writeConcern!=null){ + WriteConcern write=new WriteConcern(Integer.parseInt(writeConcern)); + db.setWriteConcern(write); + }else{ + db.setWriteConcern(Costants.DEFAULT_WRITE_TYPE); + } + if(readPreference!=null){ + ReadPreference read=ReadPreference.valueOf(readPreference); + db.setReadPreference(read); + }else{ + db.setReadPreference(Costants.DEFAULT_READ_PREFERENCE); + } + } + } catch (Exception e) { + close(); + logger.error("Problem to open the DB connection for gridfs file "); + throw new RemoteBackendException("Problem to open the DB connection: "+ e.getMessage()); + } + logger.info("new mongo connection pool opened"); + + } + return db; + } +//PATCHED METHODS + protected ObjectId getRemoteObject(GridFS gfs, MyFile resource, GridFSDBFile f) throws IOException, IllegalAccessError { + ObjectId id; + id=(ObjectId)f.getId(); + String lock=(String)f.get("lock"); + if((lock==null || lock.isEmpty()) || (isTTLUnlocked(f))){ + if((f.containsField("lock")) && (f.get("lock") != null)){ + f.put("lock", null); + f.save(); + } + download(gfs, resource, f, false); + }else{ + checkTTL(f); + } + return id; + } + + public ObjectId getRemoteObject(MyFile resource, GridFSDBFile f) throws IOException, IllegalAccessError { + ObjectId id; + id=(ObjectId)f.getId(); + String lock=(String)f.get("lock"); + if((lock==null || lock.isEmpty()) || (isTTLUnlocked(f))){ + if((f.containsField("lock")) && (f.get("lock") != null)){ + f.put("lock", null); + f.save(); + } + download(resource, f, false); + }else{ + checkTTL(f); + } + return id; + } + /** + * Unused feature + * @param f + * @return + */ + @Deprecated + public boolean isTTLUnlocked(GridFSDBFile f) { + if(f.get("timestamp")==null) + return true; + long timestamp=(Long)f.get("timestamp"); + logger.debug("timestamp found: "+timestamp); + if(timestamp != 0){ + long currentTTL=System.currentTimeMillis() - timestamp; + logger.debug("currentTTL: "+currentTTL+" TTL stabilito: "+Costants.TTL); + if(Costants.TTL < currentTTL){ + f.put("timestamp", null); + return true; + }else{ + return false; + } + }else + return true; + } + + + /** + * @param resource + * @param f + * @param isLock indicates if the file must be locked + * @throws IOException + */ + private void download(GridFS gfs, MyFile resource, GridFSDBFile f, boolean isLock) throws IOException { + OperationDefinition op=resource.getOperationDefinition(); + logger.info("MongoClient download method: "+op.toString()); +// if contains the field link it means that is a link hence I follow ne or more links + while((f !=null ) && (f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null)){ + BasicDBObject query = new BasicDBObject(); + query.put( "_id" , new ObjectId((String)f.get(Costants.LINK_IDENTIFIER)) ); +// query.put( "_id" , f.get(Costants.LINK_IDENTIFIER) ); + f=gfs.findOne( query ); + } + updateCommonFields(f, resource, OPERATION.DOWNLOAD); + f.save(); + if((resource.getLocalPath()!=null) && (!resource.getLocalPath().isEmpty())){ + readByPath(resource, f, isLock, 0); + close(); + }else if(resource.getOutputStream()!=null){ + readByOutputStream(resource, f, isLock, 0); + close(); + } + if((resource!=null) && (resource.getType()!=null) && resource.getType().equalsIgnoreCase("input")){ + readByInputStream(resource, f, isLock, 0); + } + } + + + /** + * @param resource + * @param f + * @param isLock indicates if the file must be locked + * @throws IOException + */ + private void download( MyFile resource, GridFSDBFile f, boolean isLock) throws IOException { + OperationDefinition op=resource.getOperationDefinition(); + logger.info("MongoClient download method: "+op.toString()); +// if contains the field link it means that is a link hence I follow ne or more links + while((f !=null ) && (f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null)){ + BasicDBObject query = new BasicDBObject(); + query.put( "_id" , new ObjectId((String)f.get(Costants.LINK_IDENTIFIER)) ); +// query.put( "_id" , f.get(Costants.LINK_IDENTIFIER) ); + f=getGfs().findOne( query ); + } + updateCommonFields(f, resource, OPERATION.DOWNLOAD); + f.save(); + if((resource.getLocalPath()!=null) && (!resource.getLocalPath().isEmpty())){ + readByPath(resource, f, isLock, 0); + close(); + }else if(resource.getOutputStream()!=null){ + readByOutputStream(resource, f, isLock, 0); + close(); + } + if((resource!=null) && (resource.getType()!=null) && resource.getType().equalsIgnoreCase("input")){ + readByInputStream(resource, f, isLock, 0); + } + } + + public void updateCommonFields(DBObject f, MyFile resource, OPERATION op) { + f.put("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")); + String owner=resource.getOwner(); + f.put("lastUser", owner); + if(op == null){ + op=resource.getOperationDefinition().getOperation(); + } + logger.info("set last operation: "+op); + f.put("lastOperation", op.toString()); + if(op.toString().equalsIgnoreCase(OPERATION.MOVE.toString())){ + f.put("from", resource.getLocalPath()); + } + String address=null; + try { + address=InetAddress.getLocalHost().getCanonicalHostName().toString(); + f.put("callerIP", address); + } catch (UnknownHostException e) { } + } + + + public ObjectId removeFile(Object resource, String key, boolean replace, + ObjectId oldId, GridFSDBFile fold) throws IllegalAccessError, + UnknownHostException { + logger.info("removing object with id: "+resource); + //remove old object + String oldir=(String)fold.get("dir"); + if(logger.isDebugEnabled()){ + logger.debug("old dir found "+oldir); + } + logger.info("remove old object if replace is true and the file is not locked"); + /* 20180409 removed if cause new StorageObject could not have the dir set */ +// if((oldir !=null) &&(oldir.equalsIgnoreCase(((MyFile)resource).getRemoteDir()))){ + // if the file contains a link the replace is not allowed + if((!replace)){ + return oldId; + }else if((fold.containsField(Costants.COUNT_IDENTIFIER)) && (fold.get(Costants.COUNT_IDENTIFIER)!=null)){ + close(); + throw new RemoteBackendException("The file cannot be replaced because is linked from another remote file"); + }else{ + if(logger.isDebugEnabled()) + logger.debug("remove id: "+oldId); + String lock=(String)fold.get("lock"); + //check if the od file is locked + if((lock !=null) && (!lock.isEmpty()) && (!isTTLUnlocked(fold) && (!lock.equalsIgnoreCase(key)))){ + close(); + throw new IllegalAccessError("The file is locked"); + }else{ + oldId=checkAndRemove(fold, (MyFile)resource); + } + } +// }else if(oldir == null){ +// if((!replace) && (oldId!= null)){ +// return oldId; +// } +// } + return oldId; + } + + + public ObjectId checkAndRemove(GridFSDBFile f, MyFile resource){ + String idToRemove=f.getId().toString(); + logger.info("check and remove object with id "+idToRemove+" and path: "+f.get("filename")); + ObjectId idFile=null; + if(logger.isDebugEnabled()) + logger.debug("fileFound\t remove file"); + updateCommonFields(f, resource, OPERATION.REMOVE); +// check if the file is linked + if((f!=null) && (f.containsField(Costants.COUNT_IDENTIFIER)) && (f.get(Costants.COUNT_IDENTIFIER) != null)){ + // this field is only added for reporting tool: storage-manager-trigger + String filename=(String)f.get("filename"); + f.put("onScope", filename); +// remove metadata: dir, filename, name + f.put("dir", null); + f.put("filename", null); + f.put("name", null); + f.put("onDeleting", "true"); + f.save(); + // check if the file is a link + }else if((f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null )){ + while((f!=null) && (f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null )){ + // remove f and decrement linkCount field on linked object + String id=(String)f.get(Costants.LINK_IDENTIFIER); + GridFSDBFile fLink=findGFSCollectionObject(new ObjectId(id)); + int linkCount=(Integer)fLink.get(Costants.COUNT_IDENTIFIER); + linkCount--; + if(linkCount == 0){ + // if the name the filename and dir are null, then I delete also the link object + if((fLink.get("name")==null ) && (fLink.get("filename")==null ) && (fLink.get("dir")==null )){ + ObjectId idF=(ObjectId) f.getId(); + idFile=idF; + // this field is an advice for oplog collection reader + removeGFSFile(f, idF); + if((fLink.containsField(Costants.LINK_IDENTIFIER)) && (fLink.get(Costants.LINK_IDENTIFIER) != null )){ + //the link is another link + id=(String)fLink.get(Costants.LINK_IDENTIFIER); + f=findGFSCollectionObject(new ObjectId(id)); + }else{ + // the link is not another link + f=null; + } + ObjectId idLink=(ObjectId)fLink.getId(); + idFile=idLink; + removeGFSFile(fLink, idLink); + }else{ + fLink.put(Costants.COUNT_IDENTIFIER, null); + fLink.save(); + ObjectId oId=(ObjectId) f.getId(); + idFile=oId; + removeGFSFile(f, oId); + f=null; + } + }else{ + fLink.put(Costants.COUNT_IDENTIFIER, linkCount); + fLink.save(); + ObjectId oId=(ObjectId) f.getId(); + idFile=oId; + removeGFSFile(f, oId); + f=null; + } + } + }else{ + logger.info("removing file with id: "+idToRemove); + idFile=new ObjectId(idToRemove); + removeGFSFile(f, new ObjectId(idToRemove)); + } + return idFile; + } + + + + /** + * @param f mongo gridfs file identity + * @throws IllegalAccessError + */ + public void checkTTL(GridFSDBFile f) throws IllegalAccessError { + if((f.containsField("timestamp")) && (f.get("timestamp")!= null)){ + long timestamp=(Long)f.get("timestamp"); + long currentTTL=System.currentTimeMillis() - timestamp; + close(); + throw new IllegalAccessError("the file is locked currentTTL: "+currentTTL+"TTL bound "+Costants.TTL); + }else{ + checkTTL(f); + } + } + + public ObjectId createNewFile(Object resource, String bucket, String dir, + String name, ObjectId oldId) throws UnknownHostException { + ObjectId id; + // create new dir + if((dir !=null && !dir.isEmpty()) && (bucket !=null && !bucket.isEmpty())){ + buildDirTree(getMetaDataCollection(null), dir); + } + //create new file with specified id + GridFSInputFile f2 = writePayload(resource, 0, bucket, name, dir, oldId); + id=(ObjectId)f2.getId(); + logger.info("new file created with id: "+id); + return id; + } + + + + protected GridFSInputFile writePayload(Object resource, int count, String bucket, String name, String dir, ObjectId idFile){ + GridFSInputFile f2=null; + //maybe this close is not needed +// clean(); + try{ + if(((MyFile)resource).getInputStream()!= null){ + //upload with client inputStream + f2 = writeByInputStream(resource, bucket, name, dir,idFile); + f2.save(); + + }else if(((((MyFile)resource).getType() != null) && (((MyFile)resource).getType().equals("output")))){ + // upload with outputstream + f2 = writeByOutputStream(resource, bucket, name, dir, idFile); + }else{ + // upload by local file path + f2 = writeByLocalFilePath(resource, bucket, name, dir, idFile); + f2.save(); + + } + if(logger.isDebugEnabled()) + logger.debug("Directory: "+dir); + Object id=f2.getId(); + if(logger.isDebugEnabled()) + logger.debug("ObjectId: "+id); + + // if it is an outputstream don't close + if(!((((MyFile)resource).getType() != null) && (((MyFile)resource).getType().equals("output")))){ + close(); + } + }catch(IOException e1){ + logger.error("Connection error. "+e1.getMessage()); + if(count < Costants.CONNECTION_RETRY_THRESHOLD){ + count++; + logger.info(" Retry : #"+count); + writePayload(resource, count, bucket, name, dir, idFile); + }else{ + logger.error("max number of retry completed "); + close(); + throw new RemoteBackendException(e1); + } + + } + return f2; + } + + + protected GridFSInputFile writeByLocalFilePath(Object resource, + String bucket, String name, String dir, ObjectId idFile) + throws IOException { + GridFSInputFile f2; + if(!(memoryType== MemoryType.VOLATILE)) + f2 = createGFSFileObject(new File(((MyFile)resource).getLocalPath()), ((MyFile)resource).getWriteConcern(), ((MyFile)resource).getReadPreference()); + else + f2 = createGFSFileObject(new File(((MyFile)resource).getLocalPath())); + fillInputFile(resource, bucket, name, dir, f2, idFile); + saveGFSFileObject(f2); + return f2; + } + + protected GridFSInputFile writeByOutputStream(Object resource, + String bucket, String name, String dir, ObjectId idFile) throws IOException { + GridFSInputFile f2; + if(!(memoryType== MemoryType.VOLATILE)) + f2 = createGFSFileObject(((MyFile)resource).getName(), ((MyFile)resource).getWriteConcern(), ((MyFile)resource).getReadPreference()); + else + f2 = createGFSFileObject(((MyFile)resource).getName()); + fillInputFile(resource, bucket, name, dir, f2, idFile); + ((MyFile)resource).setOutputStream(new MongoOutputStream(mongo, f2.getOutputStream())); + return f2; + } + + protected GridFSInputFile writeByInputStream(Object resource, + String bucket, String name, String dir, ObjectId idFile) + throws IOException { + GridFSInputFile f2; + if(!(memoryType== MemoryType.VOLATILE)) + f2 = createGFSFileObject(((MyFile)resource).getInputStream(), ((MyFile)resource).getWriteConcern(),((MyFile)resource).getReadPreference()); + else + f2 = createGFSFileObject(((MyFile)resource).getInputStream()); + fillInputFile(resource, bucket, name, dir, f2, idFile); + saveGFSFileObject(f2); + ((MyFile)resource).getInputStream().close(); + ((MyFile)resource).setInputStream(null); + return f2; + } + + + + + + protected void fillInputFile(Object resource, String bucket, String name, String dir, GridFSInputFile f2, ObjectId id) { + if(id != null) + f2.put("_id", new ObjectId(id.toString())); + if((bucket != null) &&(bucket.contains("/"))) + f2.put("filename", bucket); + f2.put("type", "file"); + if(name!= null) + f2.put("name", name); + if(dir!=null) + f2.put("dir", dir); + if(((MyFile)resource).getOwner() !=null) + f2.put("owner", ((MyFile)resource).getOwner()); + String mime= ((MyFile)resource).getMimeType(); + if( mime !=null){ + f2.put("mimetype", mime); + } + f2.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")); + updateCommonFields(f2, (MyFile)resource, null); + } + + + + + /** + * @param gfs + * @param query + * @throws UnknownHostException + */ + protected void removeObject(GridFS gfs, BasicDBObject query, MyFile resource){ + List list = gfs.find(query); + for(Iterator it=list.iterator(); it.hasNext();){ + GridFSDBFile f=(GridFSDBFile)it.next(); + if(f!=null){ + checkAndRemove(f, resource); + }else{ + if(logger.isDebugEnabled()) + logger.debug("File Not Found"); + } + } + } + + + public void setGenericProperties(MyFile resource, String destination, + String dir, GridFSInputFile destinationFile, String name) { + updateCommonFields(destinationFile, resource, null); + destinationFile.put("filename", destination); + destinationFile.put("type", "file"); + destinationFile.put("name", name); + destinationFile.put("dir", dir); + destinationFile.put("owner", ((MyFile)resource).getOwner()); + destinationFile.put("mimetype", ((MyFile)resource).getMimeType()); + destinationFile.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")); + } + + public BasicDBObject setGenericMoveProperties(MyFile resource, String filename, String dir, + String name, BasicDBObject f) { + f.append("filename", filename).append("type", "file").append("name", name).append("dir", dir); + return f; + } + + + public ObjectId updateId(ObjectId oldId, ObjectId newId) throws UnknownHostException { + logger.info("retrieve object with id: "+oldId); + // update chunks + updateChunksCollection(oldId, newId); + // update fs files collection + replaceObjectIDOnMetaCollection(oldId, newId); + + return newId; + } + + + protected void replaceObjectIDOnMetaCollection(ObjectId oldId, ObjectId newId) + throws UnknownHostException { + BasicDBObject oldIdQuery= new BasicDBObject(); + oldIdQuery.put("_id", oldId); + String collectionName= Costants.DEFAULT_META_COLLECTION; + DBCollection dbc=getCollection(null, collectionName); + DBObject obj=findCollectionObject(dbc, oldIdQuery);// or multiple objects? + obj.put("_id", newId); +// dbc.dropIndex("_id"); + if (!(memoryType== MemoryType.VOLATILE)){ + dbc.remove(oldIdQuery, Costants.DEFAULT_WRITE_TYPE); + dbc.insert(obj, Costants.DEFAULT_WRITE_TYPE); + }else{ + dbc.remove(oldIdQuery); + dbc.insert(obj); + } + } + + public void updateChunksCollection(ObjectId oldId, ObjectId newId) + throws UnknownHostException { + DBCollection dbc; + // update fs.chunks collection + logger.info("update chunks collection. Change file_id from "+oldId+" to "+newId); + BasicDBObject searchQuery=new BasicDBObject().append("files_id", oldId); +// searchQuery.put("files_id", oldId); + BasicDBObject queryNewFileId=new BasicDBObject().append("$set",new BasicDBObject().append("files_id", newId)); + String chunksCollectionName=Costants.DEFAULT_CHUNKS_COLLECTION; + dbc=getCollection(null, chunksCollectionName); +// if (!(memoryType== MemoryType.VOLATILE)) +// dbc.updateMulti(searchQuery, queryNewFileId);//(searchQuery, queryNewFileId, true, true, MongoIOManager.DEFAULT_WRITE_TYPE); +// else + dbc.update(searchQuery, queryNewFileId, true, true); + } + + + + + + +// END PATCHED METHODS + + protected DBCollection getMetaDataCollection() throws UnknownHostException{ + if(db==null) + this.db=getConnectionDB( dbName, true); + return db.getCollection(Costants.DEFAULT_META_COLLECTION); + } + + public DBCollection getMetaDataCollection(DB db) throws UnknownHostException{ + if(db==null){ + this.db=getConnectionDB(dbName, true); + return this.db.getCollection(Costants.DEFAULT_META_COLLECTION); + }else{ + return db.getCollection(Costants.DEFAULT_META_COLLECTION); + } + } + + protected DBCollection getCollection(DB db, String collectionName) throws UnknownHostException{ + if(db==null){ + this.db=getConnectionDB(dbName, false); + return this.db.getCollection(collectionName); + }else{ + return db.getCollection(collectionName); + } + } + + /** + * + * @param serverLocation serverpath or objectid that identifies the resource + * @param retry if true a retry mechanism is performed + * @return resource descriptor + */ + public GridFSDBFile retrieveRemoteDescriptor(String serverLocation, REMOTE_RESOURCE remoteResourceIdentifier, boolean retry){ + logger.info("MongoDB - retrieve object from pathServer: "+serverLocation); + GridFSDBFile f=null; + try{ + GridFS gfs = new GridFS(getConnectionDB( dbName, true)); + //check if the variable remotePath is a valid object id + if(ObjectId.isValid(serverLocation)){ + try{ + BasicDBObject query = new BasicDBObject(); + query.put( "_id" , new ObjectId(serverLocation) ); + f=gfs.findOne( query ); + }catch(Exception e){ + logger.warn("the file "+serverLocation+" is not a valid objectId "+e.getMessage()); + f=null; + } + if((retry && (f==null))){ + int i=0; + while((f== null) && (i retrieveRemoteObjects(BasicDBObject query) throws UnknownHostException { + GridFS gfs=getGfs(); + return gfs.find(query); + } + + public GridFSDBFile retrieveLinkPayload(GridFSDBFile f) throws UnknownHostException { + while((f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null )){ + String id=(String)f.get(Costants.LINK_IDENTIFIER); + f=getGfs().find(new ObjectId(id)); + } + return f; + } + + private GridFSDBFile patchRemoteFilePathVersion1(String serverLocation, + GridFS gfs) { + GridFSDBFile f=null; + String path=serverLocation; + //check if the file is stored by sm v.<2 (patch) + String locationV1=null; + if(serverLocation.contains(Costants.ROOT_PATH_PATCH_V1)){ + locationV1=path.replace(Costants.ROOT_PATH_PATCH_V1, Costants.ROOT_PATH_PATCH_V2); + f=gfs.findOne(locationV1); + if(f== null){ + String locationV1patch=locationV1.substring(1); + f=gfs.findOne(locationV1patch); + } + }else if(serverLocation.contains(Costants.ROOT_PATH_PATCH_V2)){ + locationV1=path.replace(Costants.ROOT_PATH_PATCH_V2, Costants.ROOT_PATH_PATCH_V1); + f=gfs.findOne(locationV1); + if(f== null){ + String locationV1patch=Costants.FILE_SEPARATOR+locationV1; + f=gfs.findOne(locationV1patch); + } + } + return f; + } + + protected List patchRemoteDirPathVersion1(String bucket, GridFS gfs, + BasicDBObject query, List list) { + List patchList=null; + //Patch incompatibility version 1 - 2 + if(bucket.contains(Costants.ROOT_PATH_PATCH_V1)){ + String locationV2=bucket.replace(Costants.ROOT_PATH_PATCH_V1, Costants.ROOT_PATH_PATCH_V2); + BasicDBObject queryPatch = new BasicDBObject(); + queryPatch.put("dir", locationV2); + patchList = gfs.find(queryPatch); + }else if(bucket.contains(Costants.ROOT_PATH_PATCH_V2)){ + String locationV1=bucket.replace(Costants.ROOT_PATH_PATCH_V2, Costants.ROOT_PATH_PATCH_V1); + BasicDBObject queryPatch = new BasicDBObject(); + queryPatch.put("dir", locationV1); + patchList = gfs.find(queryPatch); + String locationV1patch=Costants.FILE_SEPARATOR+locationV1; + queryPatch=new BasicDBObject(); + queryPatch.put("dir", locationV1patch); + List patchList2=gfs.find(queryPatch); + if((patchList2 != null) && (!patchList2.isEmpty())){ + if(patchList != null){ + patchList.addAll(patchList2); + }else{ + patchList=patchList2; + } + } + } + if ((patchList != null) && (!patchList.isEmpty())){ + list.addAll(patchList); + } + // END Patch + return list; + } + + public BasicDBObject findMetaCollectionObject(String source) throws UnknownHostException { + DBCollection fileCollection=getConnectionDB(dbName, false).getCollection(Costants.DEFAULT_META_COLLECTION); + BasicDBObject query = new BasicDBObject(); + BasicDBObject obj=null; + query.put( "filename" ,source); + DBCursor cursor=fileCollection.find(query); + if(cursor != null && !cursor.hasNext()){ + query = new BasicDBObject(); + query.put( "_id" ,new ObjectId(source)); + cursor=fileCollection.find(query); + } + if(cursor.hasNext()){ + obj=(BasicDBObject) cursor.next(); + String path=(String)obj.get("filename"); + logger.debug("path found "+path); + } + return obj; + } + + public DBObject findCollectionObject(DBCollection collection, BasicDBObject query) throws UnknownHostException { + + DBObject obj=null; + obj=collection.findOne(query); + return obj; + } + + + public DBCursor findCollectionObjects(DBCollection collection, BasicDBObject query) throws UnknownHostException { + DBCursor cursor=collection.find(query); + return cursor; + } + + + + protected GridFSDBFile findGFSCollectionObject(ObjectId id){ + return getGfs().find(id); + } + + public DBObject executeQuery(DBCollection fileCollection, BasicDBObject query) + throws UnknownHostException { + if(fileCollection == null) + fileCollection=getMetaDataCollection(getConnectionDB( dbName, false)); + DBCursor cursor=fileCollection.find(query); + if(cursor.hasNext()) + return cursor.next(); + return null; + } + + /** + * @param resource + * @param f + * @param isLock + * @return + */ + protected String readByInputStream(MyFile resource, GridFSDBFile f, boolean isLock, int count) { + String key=null; + resource.setInputStream(new MongoInputStream(mongo, f.getInputStream())); + return key; + } + + /** + * @param resource + * @param f + * @param isLock + * @return + * @throws IOException + */ + protected String readByOutputStream(MyFile resource, GridFSDBFile f, boolean isLock, int count) + throws IOException { + String key=null; + f.writeTo(resource.getOutputStream()); + resource.setOutputStream(null); + f.save(); + return key; + } + + /** + * This method write a new file on the remote server. It contains a failover system + * + * @param resource + * @param f + * @param isLock + * @return + * @throws IOException + */ + protected String readByPath(MyFile resource, GridFSDBFile f, boolean isLock, int count) + throws IOException { + String key=null; + try{ + File file=new File(resource.getLocalPath()); + f.writeTo(file); + resource.setLocalPath(null); + }catch(IOException e){ + logger.error("Connection error. "+e.getMessage()); + if(count < Costants.CONNECTION_RETRY_THRESHOLD){ + count++; + logger.info(" Retry : #"+count); + readByPath(resource,f,isLock,count); + }else{ + close(); + logger.error("max number of retry completed "); + throw new RuntimeException(e); + } + } + return key; + } + + public GridFSInputFile createGFSFileObject(InputStream is, String writeConcern, String readPreference) throws UnknownHostException { + GridFSInputFile f2; + GridFS gfs = new GridFS(getConnectionDB( dbName, true)); + f2 = gfs.createFile(is); + return f2; + } + + protected GridFSInputFile createGFSFileObject(String name, String writeConcern, String readPreference) throws IOException { + GridFSInputFile f2; + GridFS gfs = new GridFS(getConnectionDB(dbName, true)); + f2 = gfs.createFile(name); + return f2; + } + + protected GridFSInputFile createGFSFileObject(File f, String writeConcern, String readPreference){ + GridFS gfs = new GridFS(getConnectionDB(dbName, true)); + GridFSInputFile f2=null;; + try { + f2 = gfs.createFile(f); + } catch (IOException e) { + logger.error("problem in creation remote file "+f.getAbsolutePath()); + close(); + throw new RemoteBackendException(e.getMessage()); + } + return f2; + } + + public GridFSInputFile createGFSFileObject(byte[] b, String writeConcern, String readPreference){ + GridFS gfs = new GridFS(getConnectionDB(dbName, true)); + GridFSInputFile f2; + f2 = gfs.createFile(b); + return f2; + } + + protected GridFSInputFile createGFSFileObject(InputStream is) throws UnknownHostException { + GridFSInputFile f2; +// GridFS gfs = new GridFS(getDB()); + GridFS gfs = new GridFS(getConnectionDB(null, false)); + f2 = gfs.createFile(is); + return f2; + } + + protected GridFSInputFile createGFSFileObject(String name) throws IOException { + GridFSInputFile f2; + GridFS gfs = new GridFS(getConnectionDB(null, false)); + f2 = gfs.createFile(name); + return f2; + } + + protected GridFSInputFile createGFSFileObject(File f){ + GridFS gfs = new GridFS(getConnectionDB(null, false)); + GridFSInputFile f2=null;; + try { + f2 = gfs.createFile(f); + } catch (IOException e) { + logger.error("problem in creation remote file "+f.getAbsolutePath()); + close(); + throw new RemoteBackendException(e.getMessage()); + } + return f2; + } + + public GridFSInputFile createGFSFileObject(byte[] b){ + GridFS gfs = new GridFS(getConnectionDB(null, false)); + GridFSInputFile f2; + f2 = gfs.createFile(b); + return f2; + } + + + protected List getFilesOnFolder( String folderPath) { + GridFS gfs = new GridFS(getConnectionDB(dbName, false)); + BsonOperator bson=new BsonOperator(gfs); + List list=bson.getFilesOnFolder(folderPath); + close(); + return list; + } + + protected List getOwnedFiles(String username){ + GridFS gfs = new GridFS(getConnectionDB(dbName, false)); + BsonOperator bson=new BsonOperator(gfs); + List list=bson.getOwnedFiles(username); + close(); + return list; + } + + /** + * Build a directory tree from leaf to root if not already present. + * @param meta metadata collection + * @param dir directory path + */ + public void buildDirTree(DBCollection meta, String dir) { + String[] dirTree=dir.split(Costants.FILE_SEPARATOR); + StringBuffer strBuff=new StringBuffer(); + strBuff.append(Costants.FILE_SEPARATOR); + for(int i=1;i keys=obj.keySet(); + for(String key : keys){ + logger.debug(" "+key+" "+obj.get(key)); + } + } + + protected void saveGFSFileObject(GridFSInputFile f2) { + f2.save(); + + } + + /** + * the old close method + */ + protected void clean() { + if(mongo!=null) + mongo.close(); + mongo=null; + if(db!=null) + db=null; + } + + /** + * For mongo java driver version 2.14. + * MongoClient Java instance will maintain an internal pool of connections (default size of 10) + * it's not need close mongo every action. I can use it in every request. + */ + + public void close() { + if(mongo!=null) + mongo.close(); + logger.info("Mongo has been closed"); + mongo=null; + gfs=null; + db=null; + } + + public void removeGFSFile(GridFSDBFile f, ObjectId idF){ + // this field is an advice for oplog collection reader + f.put("onDeleting", "true"); + f.save(); + getGfs().remove(idF); + } + + protected void replaceGFSFile(GridFSDBFile f, ObjectId idToRemove){ + // this field is an advice for oplog collection reader + f.put("onDeleting", "true"); + f.save(); + getGfs().remove(idToRemove); + } + + + public GridFS getGfs(String dbName, boolean readwritePreferences){ + if (gfs==null){ + if(db==null){ + gfs= new GridFS(getConnectionDB(dbName, readwritePreferences)); + }else{ + gfs= new GridFS(db); + } + } + return gfs; + } + + public GridFS getGfs(boolean readwritePreferences){ + return getGfs(dbName, readwritePreferences); + } + + public GridFS getGfs(){ + return getGfs(Costants.DEFAULT_READWRITE_PREFERENCE); + } + + + public MemoryType getMemoryType() { + return memoryType; + } + + + public void setMemoryType(MemoryType memoryType) { + this.memoryType = memoryType; + } + + + public String getDbName() { + return dbName; + } + + + public void setDbName(String dbName) { + if ((dbName == null) || (dbName.isEmpty())) + this.dbName =Costants.DEFAULT_DB_NAME; + else + this.dbName = dbName; + } + + + public String getWriteConcern() { + return writeConcern; + } + + + public void setWriteConcern(String writeConcern) { + this.writeConcern = writeConcern; + } + + + public String getReadPreference() { + return readPreference; + } + + + public void setReadPreference(String readPreference) { + this.readPreference = readPreference; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoOperationManager.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoOperationManager.java new file mode 100644 index 0000000..e4d7524 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/MongoOperationManager.java @@ -0,0 +1,687 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend; + + +import org.bson.types.ObjectId; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.service.operation.*; +import org.gcube.contentmanagement.blobstorage.transport.TransportManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.resource.StorageObject; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import com.mongodb.BasicDBObject; +import com.mongodb.DBCollection; +import com.mongodb.MongoException; +import com.mongodb.gridfs.GridFS; +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSInputFile; + +/** + * MongoDB transport layer + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class MongoOperationManager extends TransportManager{ + /** + * Logger for this class + */ + final Logger logger = LoggerFactory.getLogger(MongoOperationManager.class); +// private MongoClient mongo; + private MongoIOManager mongoPrimaryInstance; + private MongoIOManager mongoSecondaryInstance; + private MemoryType memoryType; + protected static String[] dbNames; + + + public MongoOperationManager(String[] server, String user, String password, MemoryType memoryType, String[] dbNames,String writeConcern, String readConcern){ + initBackend(server,user,password, memoryType,dbNames, writeConcern, readConcern); + } + + + @Override + public void initBackend(String[] server, String user, String pass, MemoryType memoryType , String[] dbNames, String writeConcern, String readConcern) { + try { + this.memoryType=memoryType; + MongoOperationManager.dbNames=dbNames; + logger.debug("check mongo configuration"); + if (dbNames!=null){ + if(dbNames.length==1){ + logger.info("found one mongo db to connect"); + mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, dbNames[0], writeConcern, readConcern); + }else if (dbNames.length== 0){ + + logger.warn("primary db not discovered correctly. Backend will be instantiated with default value"); + mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, null, writeConcern, readConcern); + } else if (dbNames.length== 2){ + logger.info("found two mongo db to connect"); + mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, dbNames[0], writeConcern, readConcern); + mongoSecondaryInstance=getMongoInstance(server, user, pass, memoryType, dbNames[1], writeConcern, readConcern); + }else{ + throw new RuntimeException("Found more than 2 collection on the ServiceEndopint. This case is not managed"); + } + }else{ + logger.debug("primary db not discovered. Backend will be instantiated with default value"); + mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, null, writeConcern, readConcern); + } + } catch (UnknownHostException e) { + e.printStackTrace(); + } catch (MongoException e) { + e.printStackTrace(); + } + + } + + private MongoIOManager getMongoInstance(String[] server, String user, String password, MemoryType memoryType, String dbName, String writeConcern, String readPreference) + throws UnknownHostException { + MongoIOManager mongoInstance=new MongoIOManager(server, user, password, memoryType, dbName, writeConcern, readPreference);//MongoIO.getInstance(server, user, password); + mongoInstance.clean(); + DBCollection coll =mongoInstance.getMetaDataCollection();// io.getDB().getCollection("fs.files"); + coll.createIndex(new BasicDBObject("filename", 1)); // create index on "filename", ascending + coll.createIndex(new BasicDBObject("dir", 1)); // create index on "filename", ascending + coll.createIndex(new BasicDBObject("owner", 1)); // create index on "owner", ascending + return mongoInstance; + } + + + + /** + * @param serverLocation can be a path remote on the cluster or a object id + * @throws IOException + */ + @Override + public ObjectId get(Download download) throws IOException { + return download.execute(mongoPrimaryInstance, mongoSecondaryInstance); + } + + +/** + * return the key that permits the object's unlock + * @throws IOException + */ + @Override + public String lock(Lock lock) throws Exception { + return lock.execute(mongoPrimaryInstance, mongoSecondaryInstance, lock.getResource(), lock.getBucket()); + } + + + + @Override + public String put(Upload upload) throws IOException { + return upload.execute(mongoPrimaryInstance, mongoSecondaryInstance, upload.getResource(), upload.getBucket(), upload.isReplaceOption()); + } + + public void close() { + mongoPrimaryInstance.close(); +// mongoSecondaryInstance.close(); + } + + /** + * Unlock the object specified, this method accept the key field for the unlock operation + * @throws FileNotFoundException + * @throws UnknownHostException + */ + @Override + public String unlock(Unlock unlock) throws Exception { + return unlock.execute(mongoPrimaryInstance, mongoSecondaryInstance,unlock.getResource(), unlock.getBucket(), unlock.getKeyUnlock()); + } + + @Override + public Map getValues(MyFile resource, String bucket, Class type){ + Map map=null; + try{ + OperationDefinition op=resource.getOperationDefinition(); + logger.info("MongoClient getValues method: "+op.toString()); +// DB db=mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getPrimaryCollectionName(), true); + GridFS gfs = mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true); + if(logger.isDebugEnabled()){ + logger.debug("Mongo get values of dir: "+bucket); + } + + BasicDBObject query = new BasicDBObject(); + query.put("dir", bucket); + List list = gfs.find(query); + // Patch for incompatibility v 1-2 + list=mongoPrimaryInstance.patchRemoteDirPathVersion1(bucket, gfs, query, list); + //end + logger.info("find all object (files/dirs) in the directory "+bucket); + for(Iterator it=list.iterator(); it.hasNext();){ + GridFSDBFile f=(GridFSDBFile)it.next(); + if(map==null){ + map=new HashMap(); + } + StorageObject s_obj=null; + // = null if the object is not contained in a subDirectory + if((f.get("type")==null) || (f.get("type").toString().equalsIgnoreCase("file"))){ + if(logger.isDebugEnabled()) + logger.debug("found object: "+f.get("name")+" type: "+f.get("type")); + s_obj=new StorageObject(f.get("name").toString(), "file"); + String owner=(String)f.get("owner"); + if(owner !=null) + s_obj.setOwner(owner); + String creationTime=(String)f.get("creationTime"); + if(creationTime!=null) + s_obj.setCreationTime(creationTime); + s_obj.setId(f.getId().toString()); + }else{ + if(logger.isDebugEnabled()) + logger.debug("found directory: "+f.get("name")+" type: "+f.get("type")); + // check if a empty dir, if it is a empty dir then I remove it + BasicDBObject queryDir = new BasicDBObject(); + queryDir.put("dir", f.get("dir").toString()+f.get("name").toString()); + List listDir = gfs.find(queryDir); + if((listDir != null) && (listDir.size() > 0)) + s_obj=new StorageObject(f.get("name").toString(), "dir"); + else{ + // then the dir not contains subDirectory + //check if it contains subfiles + BasicDBObject queryFile = new BasicDBObject(); + queryFile.put("filename", java.util.regex.Pattern.compile(f.get("dir").toString()+"*")); + logger.info("find all files in the directory "+f.get("name")); + List listFile = gfs.find(queryFile); + logger.info("search completed"); + if((listFile != null) && (listFile.size() > 0)){ + // then it contains subFile. Insert it in the result map + s_obj=new StorageObject(f.get("name").toString(), "dir"); + }else s_obj=null; + } + } + if(s_obj !=null) + map.put(f.get("name").toString(), s_obj); + } + logger.info("search completed"); + }catch(Exception e ){ + close(); + throw new RemoteBackendException("problem to retrieve objects in the folder: "+bucket+" exception message: "+e.getMessage()); + } + close(); + return map; + } + + + @Override + public void removeRemoteFile(String bucket, MyFile resource) throws UnknownHostException{ + logger.info("Check file: "+bucket+ " for removing operation"); + GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, null, true); + if(f!=null){ + mongoPrimaryInstance.checkAndRemove(f, resource); + }else{ + if(logger.isDebugEnabled()) + logger.debug("File Not Found. Try to delete by ObjectID"); + if(bucket.length()>23){ + ObjectId id=new ObjectId(bucket); + GridFSDBFile fID=mongoPrimaryInstance.findGFSCollectionObject(id); + if(fID != null){ + mongoPrimaryInstance.checkAndRemove(fID, resource); + if(logger.isInfoEnabled()) + logger.info("object deleted by ID"); + } + } + } + close(); + } + + + @Override + public void removeDir(String remoteDir, MyFile resource){ + ArrayList dirs=new ArrayList(); + dirs.add(remoteDir); + // patch for incompatibility v 1-2 + patchCompatibilityOldLibraryVersion(remoteDir, dirs); + // end patch +// DB db=mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(),getPrimaryCollectionName(), true); + GridFS gfs =mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true);//new GridFS(db); + for(String directory : dirs){ + if(logger.isDebugEnabled()) + logger.debug("Mongo start operation delete bucket: "+directory); + // remove subfolders + if(logger.isDebugEnabled()) + logger.debug("remove subfolders of folder: "+directory); + BasicDBObject query = new BasicDBObject(); + String regex=directory+"*"; + query.put("dir", java.util.regex.Pattern.compile(regex)); + mongoPrimaryInstance.removeObject(gfs, query,resource); + query=new BasicDBObject(); + String[] dir=directory.split(Costants.FILE_SEPARATOR); + StringBuffer parentDir=new StringBuffer(); + for(int i=0;i dirs) { + if((remoteDir.contains(Costants.ROOT_PATH_PATCH_V1)) || (remoteDir.contains(Costants.ROOT_PATH_PATCH_V2))){ + if(remoteDir.contains(Costants.ROOT_PATH_PATCH_V1)){ + String remoteDirV1=remoteDir.replace(Costants.ROOT_PATH_PATCH_V1, Costants.ROOT_PATH_PATCH_V2); + dirs.add(remoteDirV1); + }else{ + String remoteDirV2= remoteDir.replace(Costants.ROOT_PATH_PATCH_V2, Costants.ROOT_PATH_PATCH_V1); + dirs.add(remoteDirV2); + String remoteDirV2patch=Costants.FILE_SEPARATOR+remoteDirV2; + dirs.add(remoteDirV2patch); + } + } + } + + @Override + public long getSize(String remotePath){ + long length=-1; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - get Size for pathServer: "+remotePath); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true); + if(f!=null){ + length=f.getLength(); + } + close(); + return length; + } + + @Override + public boolean exist(String remotePath){ + boolean isPresent=false; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - get Size for pathServer: "+remotePath); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true); + if(f!=null){ + isPresent=true; + } + close(); + return isPresent; + } + + @Override + public long getTTL(String remotePath) throws UnknownHostException{ + long timestamp=-1; + long currentTTL=-1; + long remainsTTL=-1; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - pathServer: "+remotePath); + GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true); + if(f!=null){ + timestamp=(Long)f.get("timestamp"); + if(timestamp > 0){ + currentTTL=System.currentTimeMillis() - timestamp; + remainsTTL=Costants.TTL- currentTTL; + } + + } + close(); + return remainsTTL; + } + + @Override + public long renewTTL(MyFile resource) throws UnknownHostException, IllegalAccessException{ + long ttl=-1; + MyFile file=(MyFile)resource; + REMOTE_RESOURCE remoteResourceIdentifier=file.getOperation().getRemoteResource(); + String key=file.getLockedKey(); + String remotePath=file.getRemotePath(); + GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, remoteResourceIdentifier, true); + if(f!=null){ + String lock=(String)f.get("lock"); + //check if the od file is locked + if((lock !=null) && (!lock.isEmpty())){ + String lck=(String)f.get("lock"); + if(lck.equalsIgnoreCase(key)){ + if((f.containsField("countRenew")) && (f.get("countRenew") != null)){ + int count=(Integer)f.get("countRenew"); + if(count < Costants.TTL_RENEW){ + f.put("countRenew", count+1); + }else{ + close(); +// number max of ttl renew operation reached. the operation is blocked + throw new IllegalAccessException("The number max of TTL renew reached. The number max is: "+Costants.TTL_RENEW); + } + }else{ +// first renew operation + f.put("countRenew", 1); + } + f.put("timestamp", System.currentTimeMillis()); + f.save(); + ttl=Costants.TTL; + }else{ + close(); + throw new IllegalAccessError("bad key for unlock"); + } + } + + } + close(); + return ttl; + } + + + /** + * link operation + * + */ + @Override + public String link(Link link) throws UnknownHostException{ + return link.execute(mongoPrimaryInstance, mongoSecondaryInstance, link.getResource(), link.getSourcePath(), link.getDestinationPath()); + } + + + @Override + public String copy(Copy copy) throws UnknownHostException{ + logger.info("CopyFile operation from "+copy.getSourcePath()+" to "+ copy.getDestinationPath()); + return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath()); + } + + + @Override + public String move(Move move) throws UnknownHostException{ + logger.info("MoveFile operation from "+move.getSourcePath()+" to "+ move.getDestinationPath()); + return move.execute(mongoPrimaryInstance, memoryType, move.getResource(), move.getSourcePath(), move.getDestinationPath()); + } + + + + + @Override + public String getName() { + return Costants.DEFAULT_TRANSPORT_MANAGER; + } + + + + @Override + public List copyDir(CopyDir copy) throws UnknownHostException { + return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath()); + } + + + @Override + public List moveDir(MoveDir move) throws UnknownHostException { + return move.execute(mongoPrimaryInstance, move.getResource(), move.getSourcePath(), move.getDestinationPath(), memoryType); + } + + + @Override + public String getFileProperty(String remotePath, String property){ + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true); + if(f!=null){ + String value=(String)f.get(property); + close(); + return value; + }else{ + close(); + throw new RemoteBackendException("remote file not found at path: "+remotePath); + } + } + + @Override + public void setFileProperty(String remotePath, String propertyField, String propertyValue){ + logger.trace("setting field "+propertyField+" with value: "+propertyValue); + try { + updateMetaObject(remotePath, propertyField, propertyValue); + } catch (UnknownHostException e) { + e.printStackTrace(); + throw new RemoteBackendException("UnknownHostException: "+e.getMessage()); + } + } + + /** + * This method perform a query to mongodb in order to add a new property to the metadata object + * @param remoteIdentifier: objectID or remote path of the remote object + * @param propertyField: new field name + * @param propertyValue value of the new field + * @return + * @throws UnknownHostException + */ + private void updateMetaObject(String remoteIdentifier, String propertyField, String propertyValue) + throws UnknownHostException { + BasicDBObject remoteMetaCollectionObject; + logger.debug("find object..."); + remoteMetaCollectionObject = mongoPrimaryInstance.findMetaCollectionObject(remoteIdentifier); + if(remoteMetaCollectionObject!=null){ + logger.debug("object found"); + remoteMetaCollectionObject.put(propertyField, propertyValue); + logger.info("set query field: "+propertyField+" with value: "+propertyValue); + BasicDBObject updateQuery= new BasicDBObject(); + updateQuery.put("$set", remoteMetaCollectionObject); + // retrieve original object + BasicDBObject querySourceObject = getQuery(remoteIdentifier); + //getCollection + logger.debug("get Collection "); + DBCollection metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(getPrimaryCollectionName(), false)); + //update field + logger.debug("update Collection "); + if (!(memoryType== MemoryType.VOLATILE)) + metaCollectionInstance.update(querySourceObject, updateQuery, false, true, Costants.DEFAULT_WRITE_TYPE); + else + metaCollectionInstance.update(querySourceObject, updateQuery, false, true); + logger.info("update completed"); + close(); + }else{ + logger.debug("object not found"); + close(); + throw new RemoteBackendException("remote file not found at path: "+remoteIdentifier); + } + } + + /** + * + * @param remoteIdentifier objectID or remote path of the remote object + * @return the BasicDBObject of the remote object + */ + private BasicDBObject getQuery(String remoteIdentifier) { + BasicDBObject querySourceObject = new BasicDBObject(); + logger.debug("check identifier object: "+remoteIdentifier); + if(ObjectId.isValid(remoteIdentifier)){ + logger.debug("object is a valid id"); + querySourceObject.put( "_id" , new ObjectId(remoteIdentifier)); + }else{ + logger.debug("object is a remotepath"); + querySourceObject.put( "filename" , remoteIdentifier); + } + return querySourceObject; + } + + @Override + public long getFolderTotalItems(String folderPath){ + logger.debug("getFolderTotalItems for folder "+folderPath); + long totalItems=0; + try{ + List list= mongoPrimaryInstance.getFilesOnFolder(folderPath); + totalItems=getCount(list); + logger.info("getFolderTotalItems found "+list.size()+" objects for folder "+folderPath); + }catch(Exception e ){ + close(); + throw new RemoteBackendException(e.getMessage()); + } + return totalItems; + } + + @Override + public long getFolderTotalVolume(String folderPath){ + logger.debug("getFolderTotalVolume for folder "+folderPath); + long totalVolume=0; + try{ + List list= mongoPrimaryInstance.getFilesOnFolder(folderPath); + totalVolume=getVolume(list); + logger.info("getFolderTotalVolume "+totalVolume+" for folder "+folderPath); + }catch(Exception e ){ + close(); + throw new RemoteBackendException(e.getMessage()); + } + return totalVolume; + } + + @Override + public String getUserTotalVolume(String user){ + logger.debug("getUserTotalVolume for folder "+user); + long volume=0; + try{ + List list= mongoPrimaryInstance.getOwnedFiles(user); + volume=getVolume(list); + logger.info("getUserTotalVolume found "+volume+" for user "+user); + }catch(Exception e ){ + close(); + throw new RemoteBackendException(e.getMessage()); + } + return ""+volume; + } + + @Override + public String getUserTotalItems(String user){ + logger.debug("getUserTotalItems for folder "+user); + long count=0; + try{ + List list= mongoPrimaryInstance.getOwnedFiles(user); + logger.info("getUserTotalItems found "+list.size()+" objects for user "+user); + count=getCount(list); + }catch(Exception e ){ + close(); + throw new RemoteBackendException(e.getMessage()); + } + return ""+count; + } + + + + @Override + public String getId(String path, boolean forceCreation){ + ObjectId id=null; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - pathServer: "+path); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(path, null, true); + if(f!=null){ + id=(ObjectId)f.getId(); + }else if(forceCreation){ + logger.warn("The remote file doesn't exist. An empty file will be created"); + // if the file doesn't exist. An empty file will be created + id = forceCreation(path, id); + }else{ + close(); + throw new RemoteBackendException("the file "+path+" is not present on storage. The uri is not created "); + } + close(); + return id.toString(); + } + + + private long getCount(List list){ + return list.size(); + } + + + private long getVolume(List list){ + long partialVolume=0; + for(GridFSDBFile f : list){ + long fileVolume=f.getLength(); + partialVolume=partialVolume+fileVolume; + } + return partialVolume; + } + + + private ObjectId forceCreation(String path, ObjectId id) { + if(!ObjectId.isValid(path)){ + byte[] data=new byte[1]; + GridFSInputFile f2 = null; + if (path.startsWith("/VOLATILE")){ + f2=mongoPrimaryInstance.createGFSFileObject(data);//gfs.createFile(data); + }else{ + f2=mongoPrimaryInstance.createGFSFileObject(data, null, null);//gfs.createFile(data); + } + + int indexName=path.lastIndexOf(Costants.FILE_SEPARATOR); + String name=path.substring(indexName+1); + String dir=path.substring(0, indexName+1); + f2.put("filename", path); + f2.put("name", name); + f2.put("dir", dir); + id=(ObjectId)f2.getId(); + f2.save(); + close(); + }else{ + logger.error("Cannot force creation of smp uri without a remote path. The input parameter is not a remotePath valid: "+path); + close(); + throw new RemoteBackendException("The uri is not created. Cannot force creation of smp uri without a remote path. The input parameter is not a remotePath: "+path); + } + return id; + } + + @Override + public boolean isValidId(String id){ + return ObjectId.isValid(id); + } + + @Override + public String getRemotePath(String bucket) throws UnknownHostException{ + if(!ObjectId.isValid(bucket)) + throw new RuntimeException("The following id is not valid: "+bucket); + String path=null; + path=getField(bucket, "filename"); + return path; + } + + @Override + public String getField(String remoteIdentifier, String fieldName) throws UnknownHostException { + String fieldValue=null; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - pathServer: "+remoteIdentifier); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remoteIdentifier, null, true); + if(f!=null){ + fieldValue=f.get(fieldName).toString(); + } + close(); + return fieldValue; + } + + public static String getPrimaryCollectionName(){ + if ((dbNames != null) && (dbNames.length>0)) + return dbNames[0]; + else + return null; + } + + protected static String getSecondaryCollectionName(){ + if ((dbNames != null) && (dbNames.length>1)) + return dbNames[1]; + else + return null; + } + + + /** + * Create a new file with the same remotepath and the suffix -dpl + */ + @Override + public String duplicateFile(DuplicateFile duplicate) { + return duplicate.execute(mongoPrimaryInstance); + } + + + @Override + public String softCopy(SoftCopy copy) throws UnknownHostException{ + return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath()); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/RemoteBackendException.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/RemoteBackendException.java new file mode 100644 index 0000000..6702b42 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/RemoteBackendException.java @@ -0,0 +1,28 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend; + +public class RemoteBackendException extends RuntimeException { + + + private static final long serialVersionUID = 1L; + + public RemoteBackendException() + { + super("Remote backend problem: impossible to complete operation "); + } + + public RemoteBackendException(String msg) + { + super(" Remote backend problem: impossible to complete operation "+msg ); + } + + + public RemoteBackendException(Throwable cause) + { + super(" Remote backend problem: impossible to complete operation "+cause ); + } + + public RemoteBackendException(String msg , Throwable cause){ + super(msg, cause); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyDirOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyDirOperator.java new file mode 100644 index 0000000..1f57588 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyDirOperator.java @@ -0,0 +1,103 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.InputStream; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.service.operation.CopyDir; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.gridfs.GridFS; +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSInputFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class CopyDirOperator extends CopyDir { + + Logger logger=LoggerFactory.getLogger(CopyDirOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public CopyDirOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.CopyDir#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String, java.lang.String) + */ + @Override + public List execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) + throws UnknownHostException { + String source=sourcePath; + source = appendFileSeparator(source); + String destination=destinationPath; + destination = appendFileSeparator(destination); + String parentFolder=extractParent(source); + String destinationId=null; + List idList=null; + logger.debug("copyDir operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){ + DB db = mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true);// getDB(resource); + GridFS gfs = mongoPrimaryInstance.getGfs(); +//// create query for dir field + BasicDBObject query = new BasicDBObject(); + query.put( "dir" , new BasicDBObject("$regex", source+"*")); + List folder = gfs.find(query); + if(folder!=null){ + idList=new ArrayList(folder.size()); + for(GridFSDBFile f : folder){ + if(f.get("type").equals("file")){ + String oldFilename=(String)f.get("filename"); + String oldDir=(String)f.get("dir"); + f=mongoPrimaryInstance.retrieveLinkPayload(f); + InputStream is= f.getInputStream(); + int relativePathIndex=source.length(); + String relativeDirTree=parentFolder+Costants.FILE_SEPARATOR+oldDir.substring(relativePathIndex); + String relativePath=parentFolder+Costants.FILE_SEPARATOR+oldFilename.substring(relativePathIndex); + String filename=destination+relativePath; + String dir=destination+relativeDirTree; + GridFSInputFile destinationFile=gfs.createFile(is); + destinationFile.put("filename", filename); + destinationFile.put("type", "file"); + destinationFile.put("dir", dir); + mongoPrimaryInstance.updateCommonFields(destinationFile, resource, OPERATION.COPY_DIR); + idList.add(destinationFile.getId().toString()); + if(logger.isDebugEnabled()) + logger.debug("ObjectId: "+destinationId); + mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(db), dir); + destinationFile.save(); + } + } + } + mongoPrimaryInstance.close(); + } + return idList; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyOperator.java new file mode 100644 index 0000000..69b5d3f --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/CopyOperator.java @@ -0,0 +1,129 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.InputStream; +import java.net.UnknownHostException; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Copy; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class CopyOperator extends Copy { + + + final Logger logger=LoggerFactory.getLogger(CopyOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public CopyOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Copy#execute() + */ + @Override +// public String execute(MongoIO mongoPrimaryInstance) throws UnknownHostException { + public String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException { + String source=sourcePath; + String destination=destinationPath; + String dir=((MyFile)resource).getRemoteDir(); + String originalDir=((MyFile)resource).getLocalDir(); + logger.debug("from directory: "+originalDir+ "to directory: "+dir); + String name=((MyFile)resource).getName(); + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + ObjectId destinationId=null; + logger.debug("copy operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){ + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, true); + if(f != null){ +// if it is a copy of an hardLink, then I'm going to retrieve and copy the payload associated to the link + f = mongoPrimaryInstance.retrieveLinkPayload(f); + InputStream is= f.getInputStream(); + resource.setInputStream(is); + // check if the destination is a dir or a file and if the destination exist + GridFSDBFile dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination); +// GridFSInputFile destinationFile=mongoPrimaryInstance.createGFSFileObject(is, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is); + ObjectId removedId=null; + if (dest != null){ + //overwrite the file + // removedId=mongoPrimaryInstance.checkAndRemove(f, resource); + // the third parameter to true replace the file + removedId = mongoPrimaryInstance.removeFile(resource, null, resource.isReplace(), null, dest); + if((remoteResourceIdentifier != null) && ((remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))) && (removedId != null)){ + destinationId = mongoPrimaryInstance.createNewFile(resource, null, dir, name, removedId); + }else{ + destinationId = mongoPrimaryInstance.createNewFile(resource, destination, dir , name, removedId); + } + if(logger.isDebugEnabled()) + logger.debug("ObjectId: "+destinationId); + mongoPrimaryInstance.close(); + }else{ + destinationId = mongoPrimaryInstance.createNewFile(resource, destination, dir , name, null); + mongoPrimaryInstance.close(); + } + }else{ + mongoPrimaryInstance.close(); + throw new RemoteBackendException(" the source path is wrong. There isn't a file at "+source); + } + } else throw new RemoteBackendException("Invalid arguments: source "+source+" destination "+destination); + return destinationId.toString(); + + } + + + public String safePut(MongoIOManager mongoPrimaryInstance, Object resource, String bucket, String key, boolean replace) throws UnknownHostException{ + OperationDefinition op=((MyFile)resource).getOperationDefinition(); + REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource(); + logger.info("MongoClient put method: "+op.toString()); + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + ObjectId id=null; + ObjectId oldId=null; +// id of the remote file if present + GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, remoteResourceIdentifier, false); + if(fold != null){ +// if a file is present + logger.info("a file is already present at: "+bucket); +// keep old id + oldId=(ObjectId) fold.getId(); + logger.info("get old id: "+oldId); +// create new file + id = mongoPrimaryInstance.createNewFile(resource, bucket, dir, name, null); +// remove old file + oldId = mongoPrimaryInstance.removeFile(resource, key, replace, oldId, fold); +// oldId = removeOldMetadataFile(oldId); +// update the id to the new file + id=mongoPrimaryInstance.updateId(id, oldId); + + }else{ +// create new file + id = mongoPrimaryInstance.createNewFile(resource, bucket, dir, name, oldId); + } + return id.toString(); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DownloadOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DownloadOperator.java new file mode 100644 index 0000000..2c7621c --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DownloadOperator.java @@ -0,0 +1,75 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.FileNotFoundException; +import java.io.IOException; +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Download; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class DownloadOperator extends Download { + + final Logger logger=LoggerFactory.getLogger(DownloadOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public DownloadOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Download#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override + public ObjectId execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException { + OperationDefinition op=resource.getOperationDefinition(); + logger.info("MongoClient get method: "+op.toString()); + mongoPrimaryInstance.getConnectionDB( MongoOperationManager.getPrimaryCollectionName(), true);// getDB(resource); +// GridFS gfs=mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true); + //if the operation is required by id we avoid to check if the object is available by path + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + logger.info("operation required by "+remoteResourceIdentifier); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, false); //previous value was true + ObjectId id=null; + if(f!=null){ + id = mongoPrimaryInstance.getRemoteObject(resource, f); + //check if the file is present on another db in the same backend + }else if(mongoSecondaryInstance!=null){ +// DB secondaryDb =mongoSecondaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getSecondaryCollectionName(), true);// getDB(resource); +// GridFS secondaryGfs = mongoSecondaryInstance.getGfs(getSecondaryCollectionName(), true); + GridFSDBFile secondaryF = mongoSecondaryInstance.retrieveRemoteDescriptor(getRemotePath(), remoteResourceIdentifier, true); + if(secondaryF !=null){ + id = mongoSecondaryInstance.getRemoteObject( resource, secondaryF); + } + }else{ + mongoPrimaryInstance.close(); + throw new FileNotFoundException("REMOTE FILE NOT FOUND: WRONG PATH OR WRONG OBJECT ID"); + } + return id; + + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DuplicateOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DuplicateOperator.java new file mode 100644 index 0000000..463a5fb --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/DuplicateOperator.java @@ -0,0 +1,74 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.IOException; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.DuplicateFile; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSInputFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class DuplicateOperator extends DuplicateFile { + + Logger logger=LoggerFactory.getLogger(DuplicateOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public DuplicateOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.DuplicateFile#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override + public String execute(MongoIOManager mongoPrimaryInstance){ + String destination=((MyFile)getResource()).getRemotePath()+Costants.DUPLICATE_SUFFIX; + String dir=((MyFile)getResource()).getRemoteDir(); +// String name=((MyFile)getResource()).getName(); + if((getBucket() != null) && (!getBucket().isEmpty())){ + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, true); + GridFSInputFile destinationFile=null; + try { +// GridFSInputFile f2 = mongoPrimaryInstance.createGFSFileObject(f.getFilename()); + destinationFile=mongoPrimaryInstance.createGFSFileObject(f.getInputStream(), resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is); + mongoPrimaryInstance.setGenericProperties(getResource(), destination, dir, + destinationFile, destination.substring(destination.lastIndexOf(Costants.FILE_SEPARATOR)+1)); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + String destinationId=destinationFile.getId().toString(); + destinationFile.save(); + if(logger.isDebugEnabled()) + logger.debug("ObjectId: "+destinationId); + mongoPrimaryInstance.close(); + return destinationId; + } throw new RemoteBackendException("argument cannot be null for duplicate operation"); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LinkOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LinkOperator.java new file mode 100644 index 0000000..1b58ad8 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LinkOperator.java @@ -0,0 +1,131 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.net.UnknownHostException; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Link; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.Operation; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSInputFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class LinkOperator extends Link { + + + Logger logger=LoggerFactory.getLogger(LinkOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public LinkOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Link#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String) + */ + @Override + public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException { + boolean replace=true; + String source=sourcePath; + String destination=destinationPath; + String dir=resource.getRemoteDir(); + String name=resource.getName(); + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + String destinationId=null; + String sourceId=null; + logger.debug("link operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){ + GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, false); + if(f != null){ + int count=1; + if((f.containsField(Costants.COUNT_IDENTIFIER)) && ((f.get(Costants.COUNT_IDENTIFIER) != null))){ + count=(Integer)f.get(Costants.COUNT_IDENTIFIER); + count++; + } + f.put(Costants.COUNT_IDENTIFIER, count); + mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.LINK); + sourceId=f.getId().toString(); + f.save(); + }else{ + mongoPrimaryInstance.close(); + throw new IllegalArgumentException(" source remote file not found at: "+source); + } + // check if the destination file exists +// GridFSDBFile fold = gfs.findOne(destinationPath); + GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(destinationPath, remoteResourceIdentifier, false); + if(fold != null){ + String oldir=(String)fold.get("dir"); + if(logger.isDebugEnabled()) + logger.debug("old dir found "+oldir); + if((oldir.equalsIgnoreCase(((MyFile)resource).getRemoteDir()))){ + ObjectId oldId=(ObjectId) fold.getId(); + if(!replace){ + return oldId.toString(); + }else{ + if(logger.isDebugEnabled()) + logger.debug("remove id: "+oldId); + String lock=(String)fold.get("lock"); + //check if the od file is locked + if((lock !=null) && (!lock.isEmpty()) && (!mongoPrimaryInstance.isTTLUnlocked(fold))){ + mongoPrimaryInstance.close(); + throw new IllegalAccessError("The file is locked"); + }else{ + //remove old file + mongoPrimaryInstance.removeGFSFile(fold, oldId); + } + } + } + } + // create destination file + GridFSInputFile destinationFile=null; + //create new file + byte[] data=new byte[1]; + if (resource.getGcubeMemoryType()== MemoryType.VOLATILE){ + destinationFile = mongoPrimaryInstance.createGFSFileObject(data);//gfs.createFile(data); + }else{ + destinationFile = mongoPrimaryInstance.createGFSFileObject(data, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(data); + } + if(logger.isDebugEnabled()) + logger.debug("Directory: "+dir); + mongoPrimaryInstance.setGenericProperties(resource, destinationPath, dir, + destinationFile, name); + destinationFile.put(Costants.LINK_IDENTIFIER, sourceId); + destinationId=destinationFile.getId().toString(); + if(logger.isDebugEnabled()) + logger.debug("ObjectId: "+destinationId); + mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(null), dir); + destinationFile.save(); + mongoPrimaryInstance.close(); + }else{ + mongoPrimaryInstance.close(); + throw new IllegalArgumentException(" invalid argument: source: "+source+" dest: "+destination+" the values must be not null and not empty"); + } + return destinationId.toString(); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LockOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LockOperator.java new file mode 100644 index 0000000..5c6e5bb --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/LockOperator.java @@ -0,0 +1,86 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.FileNotFoundException; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Download; +import org.gcube.contentmanagement.blobstorage.service.operation.Lock; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class LockOperator extends Lock { + + final Logger logger=LoggerFactory.getLogger(LockOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public LockOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Lock#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override + public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String serverLocation) throws Exception { + OperationDefinition op=resource.getOperationDefinition(); + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); +// if((resource.getLocalPath()!= null) && (!resource.getLocalPath().isEmpty())){ +// resource.setOperation(OPERATION.DOWNLOAD); +// Download download= new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); +// setDownload(download); +// get(getDownload(), resource, true); +// resource.setOperation(op); +// mongoPrimaryInstance.close(); +// mongoPrimaryInstance=null; +// } + logger.info("MongoClient lock method: "+op.toString()); + String key=null; + if(logger.isDebugEnabled()) + logger.debug("MongoDB - pathServer: "+resource.getAbsoluteRemotePath()); + GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(resource.getAbsoluteRemotePath(), remoteResourceIdentifier, true); + if(f!=null){ + //timestamp is used for compare to ttl of a file lock. + String lock=(String)f.get("lock"); + if((lock==null || lock.isEmpty()) || (mongoPrimaryInstance.isTTLUnlocked(f))){ + key=f.getId()+""+System.currentTimeMillis(); + f.put("lock", key); + f.put("timestamp", System.currentTimeMillis()); + mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.LOCK); + f.save(); + }else{ + mongoPrimaryInstance.checkTTL(f); + } + + }else{ + mongoPrimaryInstance.close(); + throw new FileNotFoundException("REMOTE FILE NOT FOUND: WRONG PATH OR WRONG OBJECT ID"); + } + return key; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveDirOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveDirOperator.java new file mode 100644 index 0000000..715da42 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveDirOperator.java @@ -0,0 +1,110 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.MoveDir; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.DBCollection; +import com.mongodb.DBCursor; +import com.mongodb.DBObject; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class MoveDirOperator extends MoveDir { + + Logger logger=LoggerFactory.getLogger(MoveDirOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public MoveDirOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.MoveDir#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String, java.lang.String) + */ + @Override + public List execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, + String destinationPath, MemoryType memoryType) throws UnknownHostException { + String source=sourcePath; + source = appendFileSeparator(source); + String parentFolder=extractParent(source); + String destination=destinationPath; + destination = appendFileSeparator(destination); + List idList=null; + logger.debug("moveDir operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){ + DB db=mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true); +// GridFS meta = new GridFS(db); + DBCollection meta=mongoPrimaryInstance.getMetaDataCollection(db); +// create query for dir field + BasicDBObject query = new BasicDBObject(); + query.put( "dir" , new BasicDBObject("$regex", source+"*")); + DBCursor folderCursor = meta.find(query); + if((folderCursor !=null)){ + idList=new ArrayList(); + while(folderCursor.hasNext()){//GridFSDBFile f : folder){ + DBObject f=folderCursor.next(); + if(f.get("type").equals("file")){ + String oldFilename=(String)f.get("filename"); + String oldDir=(String)f.get("dir"); + int relativePathIndex=source.length(); + String relativeDirTree=parentFolder+Costants.FILE_SEPARATOR+oldDir.substring(relativePathIndex); + String relativePath=parentFolder+Costants.FILE_SEPARATOR+oldFilename.substring(relativePathIndex); + String filename=destination+relativePath; + String dir=destination+relativeDirTree; + f.put("filename", filename); + f.put("dir", dir); + mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.MOVE_DIR); + String id=f.get("_id").toString(); + idList.add(id); + query = new BasicDBObject(); + query.put( "_id" , new ObjectId(id)); + if(!(memoryType== MemoryType.VOLATILE)) + meta.update(query, f, true, false, Costants.DEFAULT_WRITE_TYPE); + else + meta.update(query, f, true, false); +// meta.update(query, f, true, true); + mongoPrimaryInstance.buildDirTree(meta, dir); + } + } + } + }else{ + mongoPrimaryInstance.close(); + throw new IllegalArgumentException("parameters not completed, source: "+source+", destination: "+destination); + } + mongoPrimaryInstance.close(); + return idList; + + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveOperator.java new file mode 100644 index 0000000..a2727ac --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/MoveOperator.java @@ -0,0 +1,201 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.Move; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBCollection; +import com.mongodb.DBObject; +import com.mongodb.gridfs.GridFS; +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class MoveOperator extends Move { + + Logger logger=LoggerFactory.getLogger(MoveOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public MoveOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Move#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override +// public String execute(MongoIO mongoPrimaryInstance, MemoryType memoryType) throws UnknownHostException { + public String execute(MongoIOManager mongoPrimaryInstance, MemoryType memoryType, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException { + String source=sourcePath; + String destination=destinationPath; + resource.setLocalPath(sourcePath); + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + String destinationId=null; + String sourceId=null; + logger.info("move operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + logger.debug("MOVE OPERATION operation defined: "+resource.getOperationDefinition().getOperation()); + if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){ + BasicDBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source); +//check if the file exist in the destination path, if it exist then it will be deleted + if(sourcePathMetaCollection != null){ + sourceId=sourcePathMetaCollection.get("_id").toString(); + sourcePathMetaCollection=setCommonFields(sourcePathMetaCollection, resource, OPERATION.MOVE); +// updateCommonFields(sourcePathMetaCollection, resource); + BasicDBObject queryDestPath = new BasicDBObject(); + queryDestPath.put( "filename" , destinationPath); + DBCollection metaCollectionInstance=null; + if(!(memoryType== MemoryType.VOLATILE)) + metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true)); + else + metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), false)); + + DBObject destPathMetaCollection= mongoPrimaryInstance.executeQuery(metaCollectionInstance, queryDestPath); + // retrieve original object + BasicDBObject querySourcePath = new BasicDBObject(); + querySourcePath.put( "filename" , sourcePath); + //update common fields + BasicDBObject updateQuery= new BasicDBObject(); + updateQuery.put("$set", sourcePathMetaCollection); + if(!(memoryType== MemoryType.VOLATILE)) + metaCollectionInstance.update(querySourcePath, updateQuery, false, true, Costants.DEFAULT_WRITE_TYPE); + else + metaCollectionInstance.update(querySourcePath, updateQuery, false, true); + if(destPathMetaCollection != null) + destinationId=destPathMetaCollection.get("_id").toString(); + if((destPathMetaCollection!=null) && (destinationId != null) && (!destinationId.equals(sourceId))){ + mongoPrimaryInstance.printObject(destPathMetaCollection); + // if exist, keep id (it need a replace) + destinationId=destPathMetaCollection.get("_id").toString(); + logger.info("file in destination path already present with id : "+destinationId); + //remove old one +// GridFS gfs = new GridFS(mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getPrimaryCollectionName(), true)); + GridFS gfs = mongoPrimaryInstance.getGfs(MongoOperationManager.getPrimaryCollectionName(), true); + GridFSDBFile fNewFSPath = gfs.findOne(queryDestPath); + mongoPrimaryInstance.checkAndRemove(fNewFSPath, resource); + // print + logger.debug("Changing filename metadata from:"+sourcePathMetaCollection.get("filename")+"\n to: "+destinationPath); + logger.debug("original objects:\n "); + logger.debug("source object: "); + mongoPrimaryInstance.printObject(sourcePathMetaCollection); + logger.info("destination object: "); + mongoPrimaryInstance.printObject(destPathMetaCollection); + // update fields + mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB( MongoOperationManager.getPrimaryCollectionName(), true)), dir); + sourcePathMetaCollection= new BasicDBObject(); + sourcePathMetaCollection.put("$set", new BasicDBObject().append("dir", dir).append("filename", destinationPath).append("name", name).append("owner", ((MyFile)resource).getOwner())); + logger.info("new object merged "); + mongoPrimaryInstance.printObject(sourcePathMetaCollection); + //applies the update + if(!(memoryType== MemoryType.VOLATILE)) + metaCollectionInstance.update(querySourcePath, sourcePathMetaCollection, false, true, Costants.DEFAULT_WRITE_TYPE); + else + metaCollectionInstance.update(querySourcePath, sourcePathMetaCollection, false, true); + logger.info("update metadata done "); + logger.info("check update "); + DBObject newDestPathMetaCollection= mongoPrimaryInstance.executeQuery(metaCollectionInstance, queryDestPath); + mongoPrimaryInstance.printObject(newDestPathMetaCollection); + }else if((destinationId!= null) && (destinationId.equals(sourceId))){ + logger.warn("the destination id and the source id are the same id. skip operation. "); + }else{ + queryDestPath = new BasicDBObject(); + queryDestPath.put( "dir" , destination ); + DBObject folder = metaCollectionInstance.findOne(queryDestPath);//= gfs.find(query); +// if the destination is an existing folder + if((folder != null)){ + destination=appendFileSeparator(destination); + sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination+name, destination, name, sourcePathMetaCollection); + destinationId=sourcePathMetaCollection.get("_id").toString(); + mongoPrimaryInstance.buildDirTree(metaCollectionInstance, destination); + + }else{ +// if the last char of dest path is a separator then the destination is a dir otherwise is a file +// then if it is a new folder + if(destination.lastIndexOf(Costants.FILE_SEPARATOR) == destination.length()-1){ + sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination+name, destination, name, sourcePathMetaCollection); + destinationId=sourcePathMetaCollection.get("_id").toString(); + mongoPrimaryInstance.buildDirTree(metaCollectionInstance, destination); + + }else{ + String newName=destination.substring(destination.lastIndexOf(Costants.FILE_SEPARATOR)+1); + sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination, dir, newName, sourcePathMetaCollection); + destinationId=sourcePathMetaCollection.get("_id").toString(); + mongoPrimaryInstance.buildDirTree(metaCollectionInstance, dir); + } + queryDestPath = new BasicDBObject(); + queryDestPath.put( "filename" , sourcePath); + //update common fields + updateQuery= new BasicDBObject(); + updateQuery.put("$set", sourcePathMetaCollection); + if(!(memoryType== MemoryType.VOLATILE)) + metaCollectionInstance.update(queryDestPath, updateQuery, true, true, Costants.DEFAULT_WRITE_TYPE); + else + metaCollectionInstance.update(queryDestPath, updateQuery, true, true); + } + } + mongoPrimaryInstance.close(); + return destinationId; + }else{ + mongoPrimaryInstance.close(); + throw new RemoteBackendException(" the source path is wrong. There isn't a file at this path: "+source); + } + }else{ + mongoPrimaryInstance.close(); + throw new IllegalArgumentException("parameters not completed, source: "+source+", destination: "+destination); + } + + } + + private BasicDBObject setCommonFields(BasicDBObject f, MyFile resource, OPERATION op) { + String owner=resource.getOwner(); + if(op == null){ + op=resource.getOperationDefinition().getOperation(); + } + logger.info("set last operation: "+op); + String from=null; + if(op.toString().equalsIgnoreCase(OPERATION.MOVE.toString())){ + from=resource.getLocalPath(); + } + String address=null; + try { + address=InetAddress.getLocalHost().getCanonicalHostName().toString(); + f.put("callerIP", address); + + } catch (UnknownHostException e) { } + if(from == null) + f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address); + else + f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address).append("from", from); + return f; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/SoftCopyOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/SoftCopyOperator.java new file mode 100644 index 0000000..971c338 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/SoftCopyOperator.java @@ -0,0 +1,318 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.InputStream; +import java.net.UnknownHostException; + +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MemoryType; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.Operation; +import org.gcube.contentmanagement.blobstorage.service.operation.SoftCopy; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager; +import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants; +import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBCollection; +import com.mongodb.DBObject; +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class SoftCopyOperator extends SoftCopy { + + Logger logger=LoggerFactory.getLogger(SoftCopyOperator.class); + private MemoryType memoryType; + private MongoIOManager mongoPrimaryInstance; + private MyFile resource; + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public SoftCopyOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + @Override + public String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) + throws UnknownHostException { + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + LOCAL_RESOURCE localResourceIdentifier=resource.getOperation().getLocalResource(); + String source=null; + if(localResourceIdentifier.equals(LOCAL_RESOURCE.ID)) + source=resource.getId(); + else + source=sourcePath; + String destination=null; + if(remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID)) + destination=resource.getId(); + else + destination=destinationPath; + if(resource!=null){ + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + setMemoryType(((MyFile)resource).getGcubeMemoryType()); + } + setMongoPrimaryInstance(mongoPrimaryInstance); + ObjectId mapId=null; + GridFSDBFile destObject=null; + logger.debug("softCopy operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination); + if((source != null) && (!source.isEmpty())){ + GridFSDBFile sourceObject = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, true); + if(sourceObject != null){ +// GridFSDBFile originalObject=sourceObject; + // if it contains a link field, then I'm going to retrieve the related payload + sourceObject = mongoPrimaryInstance.retrieveLinkPayload(sourceObject); + ObjectId sourceId=(ObjectId)sourceObject.getId(); + InputStream is= sourceObject.getInputStream(); + resource.setInputStream(is); + GridFSDBFile dest = null; + if((destination == null) || (destination.isEmpty())){ + // if the destination param is null, the destination object will be filled with values extracted from sourceObject + if(sourceId==null) throw new RemoteBackendException("source object not found: "+source); + destination = fillGenericDestinationFields(resource, sourceId); + logger.warn("SoftCopy without destination parameter. The operation will be executed with the following destination path "+destination); + }else{ + // check if the destination is a dir or a file and if the destination exist + dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination); + } + // check if the destination is a dir or a file and if the destination exist +// GridFSDBFile dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination); +// GridFSInputFile destinationFile=mongoPrimaryInstance.createGFSFileObject(is, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is); + ObjectId removedId=null; + // if the destination location is not empty + if (dest != null){ + // remove the destination file. The third parameter to true replace the file otherwise the remote id is returned + if(resource.isReplace()){ + removedId = mongoPrimaryInstance.removeFile(resource, null, resource.isReplace(), null, dest); + }else{ + return dest.getId().toString(); + } + } + // get metacollection instance + DBCollection metaCollectionInstance = getMetaCollection(); + String md5=sourceObject.getMD5(); + // check if the payload is already present on backend + ObjectId md5Id=getDuplicatesMap(md5); + // check if the source object is already a map + if(isMap(sourceObject)){ + logger.debug("the sourceObject with the following id: "+mapId+" is already a map"); + mapId=sourceId; + // then it's needed to add only the destObject to the map + //first: create link object to destination place + DBObject newObject=createNewLinkObject(resource, sourceObject, destination, metaCollectionInstance, md5, mapId, removedId); + destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true); + // second: add the new object to the map + mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject); +// if the payload is already present on backend + }else if(md5Id!=null){ + mapId=md5Id; + logger.debug("retrieved md5 on backend with the following id: "+mapId); + mapId = addToDuplicateMap(metaCollectionInstance, mapId, sourceObject); + DBObject newObject=createNewLinkObject(resource, sourceObject, destination, metaCollectionInstance, md5, mapId, removedId); + destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true); + mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject); + }else{ + // no map present no md5 present + mapId = createNewDuplicatesMap(metaCollectionInstance, resource, sourceObject, destination, sourceId); + mapId = addToDuplicateMap(metaCollectionInstance, mapId, sourceObject); + DBObject newObject=createNewLinkObject(resource, sourceObject,destination, metaCollectionInstance, md5, mapId, removedId); + destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true); + mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject); + } + if(logger.isDebugEnabled()) + logger.debug("mapId created/updated: "+mapId); + mongoPrimaryInstance.close(); + }else{ + mongoPrimaryInstance.close(); + throw new RemoteBackendException(" the source path is wrong. There isn't a file at "+source); + } + }else throw new RemoteBackendException("Invalid arguments: source "+source+" destination "+destination); +// return mapId.toString(); + return destObject.getId().toString(); + } + + private String fillGenericDestinationFields(MyFile resource, ObjectId souceId) { + String destination; + destination=resource.getRootPath()+souceId; + resource.setName(souceId.toString()); + resource.setRemoteDir(resource.getRootPath()); + return destination; + } + + /** + * + * @param resource + * @param bucket destinationPath + * @param dir destination directory + * @param name name of the new file + * @param oldId id of the file was present in the destination place + * @return id of the new map + * @throws UnknownHostException + */ + private ObjectId createNewDuplicatesMap(DBCollection metaCollectionInstance, Object resource, GridFSDBFile sourceObject, String bucket, ObjectId sourceId) throws UnknownHostException { + ObjectId id = null; + String dir= ((MyFile)resource).getRemoteDir(); + // create new dir (is it really needed in case of map object?) + if((dir !=null && !dir.isEmpty()) && (bucket !=null && !bucket.isEmpty())){ + getMongoPrimaryInstance().buildDirTree(getMongoPrimaryInstance().getMetaDataCollection(null), dir); + } + // create new map object + id= createNewObjectMap(metaCollectionInstance, (MyFile)resource, sourceObject, sourceId); + return id; + } + + private ObjectId createNewObjectMap(DBCollection metaCollectionInstance, MyFile resource, GridFSDBFile source, ObjectId sourceId) throws UnknownHostException { + String md5=source.getMD5(); + // set type of object + DBObject document=new BasicDBObject("type", "map"); + // initialize count field to 0 + document.put("count", 0); + ObjectId id=new ObjectId(); + document.put("_id", id); + logger.debug("generated id for new map"+id); + document=fillCommonfields(document, resource, source, metaCollectionInstance, md5); + // update chunks collection + getMongoPrimaryInstance().updateChunksCollection(sourceId, id); + return id; +} + + private DBObject createNewLinkObject(MyFile resource, GridFSDBFile sourceObject, String destination, DBCollection metaCollectionInstance, String md5, ObjectId mapId, ObjectId newId){ + DBObject document=new BasicDBObject("type", "file"); + document.put("filename", destination); + document.put("name", resource.getName()); + document.put("dir", resource.getRemoteDir()); + document.put("owner", resource.getOwner()); + document.put(Costants.LINK_IDENTIFIER, mapId.toString()); + ObjectId id=null; + if(newId == null){ + id=new ObjectId(); + logger.debug("generated id for new object link"+id); + }else{ + id=newId; + logger.debug("restored id for new object link"+id); + } + document.put("_id", id); + + return fillCommonfields(document, resource, sourceObject, metaCollectionInstance, md5); + } + + private DBObject fillCommonfields(DBObject document, MyFile resource, GridFSDBFile sourceObject, DBCollection metaCollectionInstance, String md5) { + document.put("mimetype", ((MyFile)resource).getMimeType()); + document.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")); + document.put("md5", md5); + document.put("length", sourceObject.getLength()); + // set chunkSize inherited from original object + document.put("chunkSize", sourceObject.getChunkSize()); + metaCollectionInstance.insert(document); + metaCollectionInstance.save(document); + return document; + } + + private DBCollection getMetaCollection() throws UnknownHostException { + DBCollection metaCollectionInstance=null; + if(!(getMemoryType() == MemoryType.VOLATILE)) + metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true)); + else + metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), false)); + return metaCollectionInstance; + } + + private ObjectId addToDuplicateMap(DBCollection metaCollectionInstance, ObjectId mapId, GridFSDBFile f) throws UnknownHostException { + f.put(Costants.LINK_IDENTIFIER, mapId.toString()); + mongoPrimaryInstance.updateCommonFields(f, getResource(), OPERATION.SOFT_COPY); + f.save(); + incrementCountField(metaCollectionInstance, mapId); + return mapId; + } + + private void incrementCountField(DBCollection metaCollectionInstance, ObjectId mapId) throws UnknownHostException { + logger.info("increment count field on"+mapId+ " object map"); + BasicDBObject searchQuery= new BasicDBObject(); + searchQuery.put("_id" ,mapId); + DBObject mapObject=mongoPrimaryInstance.findCollectionObject(metaCollectionInstance, searchQuery); +// BasicDBObject updateObject= new BasicDBObject().append("$inc",new BasicDBObject().append("count", 1));; + int count=(int)mapObject.get("count"); + count++; + mapObject.put("count", count); +// metaCollectionInstance.update(mapObject, updateObject); + metaCollectionInstance.save(mapObject); + } + + private ObjectId getDuplicatesMap(String md5){ + ObjectId id= checkMd5(md5); + return id; + } + + /** + * @param sourceObject + * @return + */ + private boolean isMap(GridFSDBFile sourceObject) { + String type=sourceObject.get("type").toString(); + if(type.equals("map")) + return true; + return false; + } + + + + + /** + * Check if the backend already has the payload + * @param md5 string of the file + * @return the ObjectID of the md5 file found on the backend, else null + */ + private ObjectId checkMd5(String md5) { + // TODO Auto-generated method stub + return null; + } + + public MemoryType getMemoryType() { + return memoryType; + } + + public void setMemoryType(MemoryType memoryType) { + this.memoryType = memoryType; + } + + public MongoIOManager getMongoPrimaryInstance() { + return mongoPrimaryInstance; + } + + public void setMongoPrimaryInstance(MongoIOManager mongoPrimaryInstance) { + this.mongoPrimaryInstance = mongoPrimaryInstance; + } + + public MyFile getResource() { + return resource; + } + + public void setResource(MyFile resource) { + this.resource = resource; + } + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UnlockOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UnlockOperator.java new file mode 100644 index 0000000..428c618 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UnlockOperator.java @@ -0,0 +1,101 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.FileNotFoundException; + +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.Unlock; +import org.gcube.contentmanagement.blobstorage.service.operation.Upload; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; +import com.mongodb.gridfs.GridFSFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class UnlockOperator extends Unlock { + + Logger logger= LoggerFactory.getLogger(UnlockOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param backendType + * @param dbs + */ + public UnlockOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String backendType, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs); + // TODO Auto-generated constructor stub + } + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Unlock#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override + public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, String key4unlock) throws Exception { + String id=null; + OperationDefinition op=resource.getOperationDefinition(); + REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource(); + logger.info("MongoClient unlock method: "+op.toString()); +// if(((resource.getLocalPath() !=null) && (!resource.getLocalPath().isEmpty()))){ +// resource.setOperation(OPERATION.UPLOAD); +// Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames()); +// setUpload(upload); +// id=put(getUpload(), getResource(), isChunk(), false, false, true); +// mongoPrimaryInstance.close(); +// resource.setOperation(op); +// } + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + String path=getBucket(); + if(logger.isDebugEnabled()) + logger.debug("DIR: "+dir+" name: "+name+" fullPath "+path+" bucket: "+bucket); + GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(path, remoteResourceIdentifier, true); + if(f != null){ + String oldir=(String)f.get("dir"); + if(logger.isDebugEnabled()) + logger.debug("old dir found "+oldir); + if((oldir.equalsIgnoreCase(((MyFile)resource).getRemoteDir())) || ((MyFile)resource).getRemoteDir()==null){ + String lock=(String)f.get("lock"); + //check if the od file is locked + if((lock !=null) && (!lock.isEmpty())){ + String lck=(String)f.get("lock"); + if(lck.equalsIgnoreCase(key4unlock)){ + f.put("lock", null); + f.put("timestamp", null); + mongoPrimaryInstance.updateCommonFields((GridFSFile)f, (MyFile)resource, OPERATION.UNLOCK); + f.save(); + }else{ + mongoPrimaryInstance.close(); + throw new IllegalAccessError("bad key for unlock"); + } + }else{ + mongoPrimaryInstance.updateCommonFields((GridFSFile)f, (MyFile)resource, OPERATION.UNLOCK); + f.save(); + } + }else{ + mongoPrimaryInstance.close(); + throw new FileNotFoundException(path); + } + }else{ + mongoPrimaryInstance.close(); + throw new FileNotFoundException(path); + } + return id; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UploadOperator.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UploadOperator.java new file mode 100644 index 0000000..324fce7 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/operation/UploadOperator.java @@ -0,0 +1,112 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.operation; + +import java.io.IOException; +import org.bson.types.ObjectId; +import org.gcube.contentmanagement.blobstorage.resource.MyFile; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition; +import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE; +import org.gcube.contentmanagement.blobstorage.service.operation.Monitor; +import org.gcube.contentmanagement.blobstorage.service.operation.Upload; +import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.gridfs.GridFSDBFile; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class UploadOperator extends Upload { + + Logger logger= LoggerFactory.getLogger(UploadOperator.class); + /** + * @param server + * @param user + * @param pwd + * @param bucket + * @param monitor + * @param isChunk + * @param bck + * @param dbs + */ + public UploadOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, + String bck, String[] dbs) { + super(server, user, pwd, bucket, monitor, isChunk, bck, dbs); + // TODO Auto-generated constructor stub + } + + + /* (non-Javadoc) + * @see org.gcube.contentmanagement.blobstorage.service.operation.Upload#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO) + */ + @Override + public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, boolean replace) throws IOException { + OperationDefinition op=((MyFile)resource).getOperationDefinition(); + REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource(); + logger.info("MongoClient put method: "+op.toString()); + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + Object id=null; + ObjectId oldId=null; + // id of the remote file if present + GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, remoteResourceIdentifier, false); + if(fold != null){ + // if a file is present + logger.info("a file is already present at: "+getBucket()); + // keep old id + oldId=(ObjectId) fold.getId(); + logger.info("get old id: "+oldId); + // remove old file + oldId = mongoPrimaryInstance.removeFile(resource, bucket, replace, oldId, fold); + //ADDED 03112015 + if(!isReplaceOption()){ + return oldId.toString(); + } + // END ADDED + } + // create new file + logger.info("create new file "+bucket); + if((remoteResourceIdentifier != null) && ((remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))) && (ObjectId.isValid(getBucket()))){ + id = mongoPrimaryInstance.createNewFile(resource, null, dir, name, new ObjectId(getBucket())); + }else{ + id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir , name, oldId); + } + return id.toString(); + } + + public String executeSafeMode(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException { + OperationDefinition op=((MyFile)resource).getOperationDefinition(); + REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource(); + logger.info("MongoClient put method: "+op.toString()); + String dir=((MyFile)resource).getRemoteDir(); + String name=((MyFile)resource).getName(); + ObjectId id=null; + ObjectId oldId=null; +// id of the remote file if present + GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, false); + if(fold != null){ +// if a file is present + logger.info("a file is already present at: "+getBucket()); +// keep old id + oldId=(ObjectId) fold.getId(); + logger.info("get old id: "+oldId); +// create new file + id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir, name, null); +// remove old file + oldId = mongoPrimaryInstance.removeFile(resource, getBucket(), isReplaceOption(), oldId, fold); +// oldId = removeOldMetadataFile(oldId); +// update the id to the new file + id=mongoPrimaryInstance.updateId(id, oldId); + + }else{ +// create new file + id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir, name, oldId); + } + return id.toString(); + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Costants.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Costants.java new file mode 100644 index 0000000..b779275 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Costants.java @@ -0,0 +1,88 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public final class Costants { + + public static final String NO_SSL_VARIABLE_NAME="NO-SSL"; + // allowed value are: "NO-SSL", "SSL" + public static final String DEFAULT_CONNECTION_MODE="NO-SSL"; + public static final int CONNECTION_PER_HOST=30; +//millisecond + public static final int CONNECT_TIMEOUT=30000; + /** Report type - used by : Report factory class */ + public static final int ACCOUNTING_TYPE = 1; +// used by MyFile class + public static final boolean DEFAULT_REPLACE_OPTION=false; +// used by BucketCoding class and operation package + public static final String SEPARATOR="_-_"; +// used by Encrypter class + public static final String DESEDE_ENCRYPTION_SCHEME = "DESede"; +// used by Encrypter class + public static final String DES_ENCRYPTION_SCHEME = "DES"; +// used by ServiceEngine class + public static final String FILE_SEPARATOR = "/"; + public static final int CONNECTION_RETRY_THRESHOLD=5; + public static final String DEFAULT_SCOPE = "private"; + public static final long TTL=180000; + public static final boolean DEFAULT_CHUNK_OPTION=false; + public static final int TTL_RENEW = 5; + public static final String DEFAULT_RESOLVER_HOST= "data.d4science.org"; +// ***** +// used by Operation class + public static final String COUNT_IDENTIFIER="count"; + public static final String LINK_IDENTIFIER="link"; + +// used by MongoIOManager class + public static final String DEFAULT_META_COLLECTION="fs.files"; + public static final String DEFAULT_DB_NAME="remotefs"; + public static final String ROOT_PATH_PATCH_V1=Costants.FILE_SEPARATOR+"home"+Costants.FILE_SEPARATOR+"null"+Costants.FILE_SEPARATOR; + public static final String ROOT_PATH_PATCH_V2=Costants.FILE_SEPARATOR+"public"+Costants.FILE_SEPARATOR; + public static final String DEFAULT_CHUNKS_COLLECTION = "fs.chunks"; +// public static final WriteConcern DEFAULT_WRITE_TYPE=WriteConcern.NORMAL; + public static final WriteConcern DEFAULT_WRITE_TYPE=WriteConcern.REPLICA_ACKNOWLEDGED; + public static final ReadPreference DEFAULT_READ_PREFERENCE=ReadPreference.primaryPreferred(); +// public static final boolean DEFAULT_READWRITE_PREFERENCE= false; + public static final boolean DEFAULT_READWRITE_PREFERENCE= true; + +// used by GetHttpsUrl class + public static final String URL_SEPARATOR="/"; + public static final String VOLATILE_URL_IDENTIFICATOR = "-VLT"; + +// used by OperationManager class + //COSTANT CLIENT FACTORY CLIENT + public static final String CLIENT_TYPE="mongo"; + // COSTANTS FOR THREAD MANAGEMENT (not used by mongodb) + public static final int MIN_THREAD=1; + public static final int MAX_THREAD=10; + // COSTANTS FOR CHUNK MANAGEMENT (not used by mongodb) + public static final int sogliaNumeroMassimo=400; + public static final int sogliaNumeroMinimo=4; + // dimension is express in byte + public static final int sogliaDimensioneMinima=1024*1024; + // dimension is express in byte + public static final int sogliaDimensioneMassima= 4*1024*1024; + +// used by DuplicateOperator class + public static final String DUPLICATE_SUFFIX="-dpl"; + +// unused by GetPayload map + public static final String MAP_FIELD=""; + +// used by TransportManager class + public static final String DEFAULT_TRANSPORT_MANAGER="MongoDB"; + + + + + + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/DateUtils.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/DateUtils.java new file mode 100644 index 0000000..fd6df34 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/DateUtils.java @@ -0,0 +1,26 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import java.util.Calendar; +import java.text.SimpleDateFormat; +/** + * Calculates the current Date + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class DateUtils { + + +/** + * usage: DateUtils.now("dd MMMMM yyyy") + * + * @param dateFormat "dd MMMMM yyyy" + * @return the converted string + */ + public static String now(String dateFormat) { + Calendar cal = Calendar.getInstance(); + SimpleDateFormat sdf = new SimpleDateFormat(dateFormat); + return sdf.format(cal.getTime()); + + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetMD5.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetMD5.java new file mode 100644 index 0000000..9e03c7c --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetMD5.java @@ -0,0 +1,51 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; + +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.io.IOUtils; +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ + +public class GetMD5 { + + private File file; + + public GetMD5(String filePath) { + this.file = new File(filePath); + } + + public GetMD5(File file) { + this.file = file; + } + + public String getMD5() { + String md5 = null; + + FileInputStream fileInputStream = null; + + try { + fileInputStream = new FileInputStream(this.file); + + // md5Hex converts an array of bytes into an array of characters representing the hexadecimal values of each byte in order. + // The returned array will be double the length of the passed array, as it takes two characters to represent any given byte. + + md5 = DigestUtils.md5Hex(IOUtils.toByteArray(fileInputStream)); + + fileInputStream.close(); + + } catch (IOException e) { + e.printStackTrace(); + } + + return md5; + } +} \ No newline at end of file diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetPayloadMap.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetPayloadMap.java new file mode 100644 index 0000000..ad5a608 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/GetPayloadMap.java @@ -0,0 +1,25 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import org.bson.types.ObjectId; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class GetPayloadMap { + + + private ObjectId mapId; + + public GetPayloadMap(ObjectId id){ + mapId=id; + } + + public ObjectId getMap(){ + return null; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoInputStream.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoInputStream.java new file mode 100644 index 0000000..fa86be1 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoInputStream.java @@ -0,0 +1,103 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import java.io.IOException; +import java.io.InputStream; +import org.apache.commons.io.input.ProxyInputStream; +import com.mongodb.MongoClient; + +/** + * + * Generates a input stream and close the mongo connection + * @author Roberto Cirillo (ISTI - CNR) + * + */ +public class MongoInputStream extends ProxyInputStream{ + + private MongoClient mongo; + private boolean closed; + + + public MongoInputStream(MongoClient mongo, InputStream proxy) { + super(proxy); + this.mongo=mongo; + // TODO Auto-generated constructor stub + } + + @Override + public void close(){ + if(!isClosed()){ + try { + super.close(); + } catch (IOException e) { + e.printStackTrace(); + } + if (mongo!=null) + mongo.close(); + setClosed(true); + } + } + + public boolean isClosed() { + return closed; + } + + public void setClosed(boolean closed) { + this.closed = closed; + } + + public int read() throws IOException { + int n = in.read(); + if (n == -1) { + close(); + } + return n; + } + + /** + * Reads and returns bytes from the underlying input stream to the given + * buffer. If the underlying stream returns -1, the {@link #close()} method + * i called to automatically close and discard the stream. + * + * @param b buffer to which bytes from the stream are written + * @return number of bytes read, or -1 if no more bytes are available + * @throws IOException if the stream could not be read or closed + */ + public int read(byte[] b) throws IOException { + int n = in.read(b); + if (n == -1) { + close(); + } + return n; + } + + /** + * Reads and returns bytes from the underlying input stream to the given + * buffer. If the underlying stream returns -1, the {@link #close()} method + * i called to automatically close and discard the stream. + * + * @param b buffer to which bytes from the stream are written + * @param off start offset within the buffer + * @param len maximum number of bytes to read + * @return number of bytes read, or -1 if no more bytes are available + * @throws IOException if the stream could not be read or closed + */ + public int read(byte[] b, int off, int len) throws IOException { + int n = in.read(b, off, len); + if (n == -1) { + close(); + + } + return n; + } + + /** + * Ensures that the stream is closed before it gets garbage-collected. + * As mentioned in {@link #close()}, this is a no-op if the stream has + * already been closed. + * @throws Throwable if an error occurs + */ + protected void finalize() throws Throwable { + close(); + super.finalize(); + } +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoOutputStream.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoOutputStream.java new file mode 100644 index 0000000..ada9466 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/MongoOutputStream.java @@ -0,0 +1,82 @@ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import java.io.IOException; +import java.io.OutputStream; +import org.apache.commons.io.output.ProxyOutputStream; +import com.mongodb.MongoClient; + +public class MongoOutputStream extends ProxyOutputStream { + + + private MongoClient mongo; + private boolean closed; + + + public MongoOutputStream(MongoClient mongo, OutputStream proxy) { + super(proxy); + this.mongo=mongo; + // TODO Auto-generated constructor stub + } + + + /** + * Invokes the delegate's write(int) method. + * @param idx the byte to write + * @throws IOException if an I/O error occurs + */ + public void write(int idx) throws IOException { + out.write(idx); + } + + /** + * Invokes the delegate's write(byte[]) method. + * @param bts the bytes to write + * @throws IOException if an I/O error occurs + */ + public void write(byte[] bts) throws IOException { + out.write(bts); + } + + /** + * Invokes the delegate's write(byte[]) method. + * @param bts the bytes to write + * @param st The start offset + * @param end The number of bytes to write + * @throws IOException if an I/O error occurs + */ + public void write(byte[] bts, int st, int end) throws IOException { + out.write(bts, st, end); + } + /** + * Invokes the delegate's flush() method. + * @throws IOException if an I/O error occurs + */ + public void flush() throws IOException { + out.flush(); + } + /** + * Invokes the delegate's close() method. + * @throws IOException if an I/O error occurs + */ + public void close() throws IOException { + if(!isClosed()){ + try { + super.close(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + mongo.close(); + setClosed(true); + } + } + + public void setClosed(boolean closed) { + this.closed = closed; + } + + public boolean isClosed() { + return closed; + } + +} diff --git a/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Utils.java b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Utils.java new file mode 100644 index 0000000..3641e17 --- /dev/null +++ b/src/main/java/org/gcube/contentmanagement/blobstorage/transport/backend/util/Utils.java @@ -0,0 +1,48 @@ +/** + * + */ +package org.gcube.contentmanagement.blobstorage.transport.backend.util; + +import java.util.Iterator; +import java.util.Map; +import java.util.TreeSet; + +/** + * @author Roberto Cirillo (ISTI-CNR) 2018 + * + */ +public class Utils { + + public static String checkVarEnv(String name){ + Map env = System.getenv(); + TreeSet keys = new TreeSet(env.keySet()); + Iterator iter = keys.iterator(); + String value=null; + while(iter.hasNext()) + { + String key = iter.next(); + if(key.equalsIgnoreCase(name)){ + value=env.get(key); + break; + } + } + return value; + } + + public static boolean isVarEnv(String name){ + Map env = System.getenv(); + TreeSet keys = new TreeSet(env.keySet()); + Iterator iter = keys.iterator(); + String value=null; + while(iter.hasNext()) + { + String key = iter.next(); + if(key.equalsIgnoreCase(name)){ + value=env.get(key); + return true; + } + } + return false; + } + +}