solved issue on upload Archive functionality

This commit is contained in:
Lucio Lelii 2024-11-22 11:28:38 +01:00
parent 47a4564ffa
commit 34e7820514
5 changed files with 32 additions and 38 deletions

View File

@ -3,7 +3,7 @@
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [v2.0.0]
## [v2.0.0-SNAPSHOT]
- ceph as default storage
- vre folders can define specific bucket as backend

View File

@ -10,7 +10,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.gcube.data.access</groupId>
<artifactId>storagehub</artifactId>
<version>2.0.0</version>
<version>2.0.0-SNAPSHOT</version>
<name>storagehub</name>
<scm>
<connection>
@ -42,7 +42,7 @@
<dependency>
<groupId>org.gcube.distribution</groupId>
<artifactId>gcube-smartgears-bom</artifactId>
<version>4.0.0</version>
<version>4.0.1-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>

View File

@ -1,6 +1,7 @@
package org.gcube.data.access.storagehub.handlers.items;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.HashSet;
@ -102,27 +103,15 @@ public class ItemHandler {
authChecker.checkWriteAuthorizationControl(ses, parameters.getUser(), destination.getIdentifier(), true);
try {
Node newNode = null;
switch (parameters.getMangedType()) {
case FILE:
newNode = create((FileCreationParameters) parameters, destination);
break;
case FOLDER:
newNode = create((FolderCreationParameters) parameters, destination);
break;
case ARCHIVE:
newNode = create((ArchiveStructureCreationParameter) parameters, destination);
break;
case URL:
newNode = create((URLCreationParameters) parameters, destination);
break;
case GCUBEITEM:
newNode = create((GCubeItemCreationParameters) parameters, destination);
break;
default:
throw new InvalidCallParameters("Item not supported");
}
log.debug("item with id {} correctly created", newNode.getIdentifier());
Node newNode = switch (parameters.getMangedType()) {
case FILE -> create((FileCreationParameters) parameters, destination);
case FOLDER -> create((FolderCreationParameters) parameters, destination);
case ARCHIVE -> create((ArchiveStructureCreationParameter) parameters, destination);
case URL -> create((URLCreationParameters) parameters, destination);
case GCUBEITEM -> create((GCubeItemCreationParameters) parameters, destination);
default -> throw new InvalidCallParameters("Item not supported");
};
log.debug("item with id {} correctly created", newNode.getIdentifier());
return newNode.getIdentifier();
} finally {
if (parameters.getSession().getWorkspace().getLockManager().isLocked(destination.getPath()))
@ -182,7 +171,7 @@ public class ItemHandler {
if (entry.isDirectory()) {
log.debug("creating directory with entire path {} ", entirePath);
createPath(entirePath, directoryNodeMap, parentDirectoryNode, params.getSession(), params.getUser());
} else {
} else
try {
String name = entirePath.replaceAll("([^/]*/)*(.*)", "$2");
String parentPath = entirePath.replaceAll("(([^/]*/)*)(.*)", "$1");
@ -192,22 +181,27 @@ public class ItemHandler {
long fileSize = entry.getSize();
FormDataContentDisposition fileDetail = FormDataContentDisposition.name(name).size(fileSize)
.build();
//this code has been added for a bug on s3 client(v1) that closes the stream
InputStream notClosableIS = new BufferedInputStream(input) {
@Override
public void close() throws IOException { }
};
if (parentPath.isEmpty()) {
fileNode = createFileItemInternally(params.getSession(), parentDirectoryNode, input, name, "",
fileNode = createFileItemInternally(params.getSession(), parentDirectoryNode, notClosableIS, name, "",
fileDetail, params.getUser(), false);
} else {
Node parentNode = directoryNodeMap.get(parentPath);
if (parentNode == null)
parentNode = createPath(parentPath, directoryNodeMap, parentDirectoryNode,
params.getSession(), params.getUser());
fileNode = createFileItemInternally(params.getSession(), parentNode, input, name, "",
fileNode = createFileItemInternally(params.getSession(), parentNode, notClosableIS, name, "",
fileDetail, params.getUser(), false);
}
fileNodes.add(fileNode);
} catch (Throwable e) {
log.warn("error getting file {}", entry.getName(), e);
}
}
}
log.info("archive {} uploading finished ", params.getParentFolderName());
@ -217,6 +211,8 @@ public class ItemHandler {
return parentDirectoryNode;
}
private Node createPath(String parentPath, Map<String, Node> directoryNodeMap, Node rootNode, Session ses,
String user) throws StorageHubException, RepositoryException {
String[] parentPathSplit = parentPath.split("/");
@ -346,8 +342,7 @@ public class ItemHandler {
throws BackendGenericError {
log.trace("UPLOAD: filling content");
ContentHandler handler = getContentHandler(stream, storageBackend, name, fileDetails, relPath, login);
AbstractFileItem item = handler.buildItem(name, description, login);
return item;
return handler.buildItem(name, description, login);
}
private ContentHandler getContentHandler(InputStream stream, StorageBackend storageBackend, String name,

View File

@ -183,8 +183,7 @@ public class S3Backend extends StorageBackend{
@Override
public InputStream download(String id) throws StorageIdNotFoundException{
try {
InputStream inputStream = client.getObject(bucketName, id).getObjectContent();
return inputStream;
return client.getObject(bucketName, id).getObjectContent();
}catch (Exception e) {
log.error("error downloading file form s3");
throw new RuntimeException("error downloading file from s3",e);

View File

@ -114,7 +114,7 @@
= nthl:payloadBackend
mandatory autocreated
+ *
+ * (nthl:workspaceItem)
[nthl:workspaceSharedItem] > nthl:workspaceItem, mix:shareable
- hl:privilege (String)
@ -212,7 +212,7 @@
[nthl:externalPdf] > nthl:workspaceLeafItem
nthl:externalLink > nthl:workspaceItem
[nthl:externalLink] > nthl:workspaceItem
- hl:value