forked from D-Net/dnet-hadoop
added option to do a new deposition or new version of an old deposition
This commit is contained in:
parent
1b3ad1bce6
commit
3aedfdf0d6
|
@ -1,23 +1,26 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.graph.dump;
|
package eu.dnetlib.dhp.oa.graph.dump;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import java.io.Serializable;
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.community.CommunityMap;
|
import java.util.Optional;
|
||||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
|
||||||
|
import eu.dnetlib.dhp.common.api.ZenodoAPIClient;
|
||||||
|
import eu.dnetlib.dhp.common.api.MissingConceptDoiException;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.*;
|
||||||
|
|
||||||
import java.io.File;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
import java.io.Serializable;
|
import eu.dnetlib.dhp.oa.graph.dump.community.CommunityMap;
|
||||||
|
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
||||||
|
|
||||||
public class SendToZenodoHDFS implements Serializable {
|
public class SendToZenodoHDFS implements Serializable {
|
||||||
|
|
||||||
private static final Log log = LogFactory.getLog(SendToZenodoHDFS.class);
|
private static final Log log = LogFactory.getLog(SendToZenodoHDFS.class);
|
||||||
|
|
||||||
public static void main(final String[] args) throws Exception {
|
public static void main(final String[] args) throws Exception, MissingConceptDoiException {
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||||
IOUtils
|
IOUtils
|
||||||
.toString(
|
.toString(
|
||||||
|
@ -33,6 +36,9 @@ public class SendToZenodoHDFS implements Serializable {
|
||||||
final String connection_url = parser.get("connectionUrl");
|
final String connection_url = parser.get("connectionUrl");
|
||||||
final String metadata = parser.get("metadata");
|
final String metadata = parser.get("metadata");
|
||||||
final String isLookUpUrl = parser.get("isLookUpUrl");
|
final String isLookUpUrl = parser.get("isLookUpUrl");
|
||||||
|
final Boolean newDeposition = Boolean.valueOf(parser.get("newDeposition"));
|
||||||
|
final String concept_rec_id = Optional.ofNullable(parser.get("conceptRecordId"))
|
||||||
|
.orElse(null);
|
||||||
|
|
||||||
QueryInformationSystem qis = new QueryInformationSystem();
|
QueryInformationSystem qis = new QueryInformationSystem();
|
||||||
qis.setIsLookUp(ISLookupClientFactory.getLookUpService(isLookUpUrl));
|
qis.setIsLookUp(ISLookupClientFactory.getLookUpService(isLookUpUrl));
|
||||||
|
@ -46,8 +52,16 @@ public class SendToZenodoHDFS implements Serializable {
|
||||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||||
.listFiles(
|
.listFiles(
|
||||||
new Path(hdfsPath), true);
|
new Path(hdfsPath), true);
|
||||||
APIClient apiClient = new APIClient(connection_url, access_token);
|
ZenodoAPIClient zenodoApiClient = new ZenodoAPIClient(connection_url, access_token);
|
||||||
apiClient.connect();
|
if (newDeposition){
|
||||||
|
zenodoApiClient.newDeposition();
|
||||||
|
}else{
|
||||||
|
if (concept_rec_id == null){
|
||||||
|
throw new MissingConceptDoiException("No concept record id has been provided");
|
||||||
|
}
|
||||||
|
zenodoApiClient.newVersion(concept_rec_id);
|
||||||
|
}
|
||||||
|
|
||||||
while (fileStatusListIterator.hasNext()) {
|
while (fileStatusListIterator.hasNext()) {
|
||||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||||
|
|
||||||
|
@ -58,22 +72,16 @@ public class SendToZenodoHDFS implements Serializable {
|
||||||
String community = tmp.substring(tmp.lastIndexOf("/") + 1);
|
String community = tmp.substring(tmp.lastIndexOf("/") + 1);
|
||||||
log.info("Sending information for community: " + community);
|
log.info("Sending information for community: " + community);
|
||||||
String community_name = communityMap.get(community).replace(" ", "_") + ".json.gz";
|
String community_name = communityMap.get(community).replace(" ", "_") + ".json.gz";
|
||||||
//log.info("Copying information for community: " + community);
|
|
||||||
//fileSystem.copyToLocalFile(p, new Path("/tmp/" + community_name));
|
|
||||||
//File f = new File("/tmp/" + community_name);
|
|
||||||
try {
|
|
||||||
FSDataInputStream inputStream = fileSystem.open(p);
|
|
||||||
apiClient.uploadIS(inputStream, community_name);
|
|
||||||
|
|
||||||
} catch(Exception e){
|
FSDataInputStream inputStream = fileSystem.open(p);
|
||||||
|
zenodoApiClient.uploadIS(inputStream, community_name, fileStatus.getLen());
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
apiClient.sendMretadata(metadata);
|
zenodoApiClient.sendMretadata(metadata);
|
||||||
apiClient.publish();
|
zenodoApiClient.publish();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue