forked from D-Net/dnet-hadoop
This commit is contained in:
parent
edeb862476
commit
e8f914f8b3
|
@ -80,7 +80,7 @@
|
||||||
</configuration>
|
</configuration>
|
||||||
</global>
|
</global>
|
||||||
|
|
||||||
<start to="reset_outputpath"/>
|
<start to="send_zenodo"/>
|
||||||
|
|
||||||
<kill name="Kill">
|
<kill name="Kill">
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
@ -366,146 +366,19 @@
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
<!-- <join name="join_extend" to="fork_splitForCommunities"/>-->
|
<action name="send_zenodo">
|
||||||
|
<java>
|
||||||
|
<main-class>eu.dnetlib.dhp.oa.graph.dump.SendToZenodo</main-class>
|
||||||
<!-- <fork name="fork_splitForCommunities">-->
|
<arg>--hdfsPath</arg><arg>${outputPath}</arg>
|
||||||
<!-- <path start="split_publication"/>-->
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
<!-- <path start="split_dataset"/>-->
|
<arg>--accessToken</arg><arg>${accessToken}</arg>
|
||||||
<!-- <path start="split_orp"/>-->
|
<arg>--connectionUrl</arg><arg>${connectionUrl}</arg>
|
||||||
<!-- <path start="split_software"/>-->
|
<arg>--metadata</arg><arg>${metadata}</arg>
|
||||||
<!-- </fork>-->
|
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
||||||
|
</java>
|
||||||
<!-- <action name="split_publication">-->
|
<ok to="End"/>
|
||||||
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
|
<error to="Kill"/>
|
||||||
<!-- <master>yarn</master>-->
|
</action>
|
||||||
<!-- <mode>cluster</mode>-->
|
|
||||||
<!-- <name>Split dumped result for community</name>-->
|
|
||||||
<!-- <class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>-->
|
|
||||||
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
|
|
||||||
<!-- <spark-opts>-->
|
|
||||||
<!-- --executor-memory=${sparkExecutorMemory}-->
|
|
||||||
<!-- --executor-cores=${sparkExecutorCores}-->
|
|
||||||
<!-- --driver-memory=${sparkDriverMemory}-->
|
|
||||||
<!-- --conf spark.extraListeners=${spark2ExtraListeners}-->
|
|
||||||
<!-- --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
|
|
||||||
<!-- --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
|
|
||||||
<!-- --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
|
|
||||||
<!-- --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
|
|
||||||
<!-- </spark-opts>-->
|
|
||||||
<!-- <arg>--sourcePath</arg><arg>${workingDir}/ext/publication</arg>-->
|
|
||||||
<!-- <arg>--outputPath</arg><arg>${outputPath}</arg>-->
|
|
||||||
<!-- <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Publication</arg>-->
|
|
||||||
<!-- <arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>-->
|
|
||||||
<!-- </spark>-->
|
|
||||||
<!-- <ok to="join_split"/>-->
|
|
||||||
<!-- <error to="Kill"/>-->
|
|
||||||
<!-- </action>-->
|
|
||||||
|
|
||||||
<!-- <action name="split_dataset">-->
|
|
||||||
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
|
|
||||||
<!-- <master>yarn</master>-->
|
|
||||||
<!-- <mode>cluster</mode>-->
|
|
||||||
<!-- <name>Split dumped result for community</name>-->
|
|
||||||
<!-- <class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>-->
|
|
||||||
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
|
|
||||||
<!-- <spark-opts>-->
|
|
||||||
<!-- --executor-memory=${sparkExecutorMemory}-->
|
|
||||||
<!-- --executor-cores=${sparkExecutorCores}-->
|
|
||||||
<!-- --driver-memory=${sparkDriverMemory}-->
|
|
||||||
<!-- --conf spark.extraListeners=${spark2ExtraListeners}-->
|
|
||||||
<!-- --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
|
|
||||||
<!-- --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
|
|
||||||
<!-- --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
|
|
||||||
<!-- --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
|
|
||||||
<!-- </spark-opts>-->
|
|
||||||
<!-- <arg>--sourcePath</arg><arg>${workingDir}/ext/dataset</arg>-->
|
|
||||||
<!-- <arg>--outputPath</arg><arg>${outputPath}</arg>-->
|
|
||||||
<!-- <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Dataset</arg>-->
|
|
||||||
<!-- <arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>-->
|
|
||||||
<!-- </spark>-->
|
|
||||||
<!-- <ok to="join_split"/>-->
|
|
||||||
<!-- <error to="Kill"/>-->
|
|
||||||
<!-- </action>-->
|
|
||||||
<!-- <action name="split_orp">-->
|
|
||||||
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
|
|
||||||
<!-- <master>yarn</master>-->
|
|
||||||
<!-- <mode>cluster</mode>-->
|
|
||||||
<!-- <name>Split dumped result for community</name>-->
|
|
||||||
<!-- <class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>-->
|
|
||||||
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
|
|
||||||
<!-- <spark-opts>-->
|
|
||||||
<!-- --executor-memory=${sparkExecutorMemory}-->
|
|
||||||
<!-- --executor-cores=${sparkExecutorCores}-->
|
|
||||||
<!-- --driver-memory=${sparkDriverMemory}-->
|
|
||||||
<!-- --conf spark.extraListeners=${spark2ExtraListeners}-->
|
|
||||||
<!-- --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
|
|
||||||
<!-- --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
|
|
||||||
<!-- --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
|
|
||||||
<!-- --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
|
|
||||||
<!-- </spark-opts>-->
|
|
||||||
<!-- <arg>--sourcePath</arg><arg>${workingDir}/ext/orp</arg>-->
|
|
||||||
<!-- <arg>--outputPath</arg><arg>${outputPath}</arg>-->
|
|
||||||
<!-- <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.OtherResearchProduct</arg>-->
|
|
||||||
<!-- <arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>-->
|
|
||||||
<!-- </spark>-->
|
|
||||||
<!-- <ok to="join_split"/>-->
|
|
||||||
<!-- <error to="Kill"/>-->
|
|
||||||
<!-- </action>-->
|
|
||||||
<!-- <action name="split_software">-->
|
|
||||||
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
|
|
||||||
<!-- <master>yarn</master>-->
|
|
||||||
<!-- <mode>cluster</mode>-->
|
|
||||||
<!-- <name>Split dumped result for community</name>-->
|
|
||||||
<!-- <class>eu.dnetlib.dhp.oa.graph.dump.SparkSplitForCommunity</class>-->
|
|
||||||
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
|
|
||||||
<!-- <spark-opts>-->
|
|
||||||
<!-- --executor-memory=${sparkExecutorMemory}-->
|
|
||||||
<!-- --executor-cores=${sparkExecutorCores}-->
|
|
||||||
<!-- --driver-memory=${sparkDriverMemory}-->
|
|
||||||
<!-- --conf spark.extraListeners=${spark2ExtraListeners}-->
|
|
||||||
<!-- --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
|
|
||||||
<!-- --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
|
|
||||||
<!-- --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
|
|
||||||
<!-- --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
|
|
||||||
<!-- </spark-opts>-->
|
|
||||||
<!-- <arg>--sourcePath</arg><arg>${workingDir}/ext/software</arg>-->
|
|
||||||
<!-- <arg>--outputPath</arg><arg>${outputPath}</arg>-->
|
|
||||||
<!-- <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Software</arg>-->
|
|
||||||
<!-- <arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>-->
|
|
||||||
<!-- </spark>-->
|
|
||||||
<!-- <ok to="join_split"/>-->
|
|
||||||
<!-- <error to="Kill"/>-->
|
|
||||||
<!-- </action>-->
|
|
||||||
|
|
||||||
<!-- <join name="join_split" to="loadInZenodo"/>-->
|
|
||||||
<!-- <join name="join_split" to="End"/>-->
|
|
||||||
|
|
||||||
<!-- <action name="loadInZenodo">-->
|
|
||||||
<!-- <spark xmlns="uri:oozie:spark-action:0.2">-->
|
|
||||||
<!-- <master>yarn</master>-->
|
|
||||||
<!-- <mode>cluster</mode>-->
|
|
||||||
<!-- <name>Import table software</name>-->
|
|
||||||
<!-- <class>eu.dnetlib.dhp.oa.graph.hive.GraphHiveTableImporterJob</class>-->
|
|
||||||
<!-- <jar>dhp-graph-mapper-${projectVersion}.jar</jar>-->
|
|
||||||
<!-- <spark-opts>-->
|
|
||||||
<!-- --executor-memory=${sparkExecutorMemory}-->
|
|
||||||
<!-- --executor-cores=${sparkExecutorCores}-->
|
|
||||||
<!-- --driver-memory=${sparkDriverMemory}-->
|
|
||||||
<!-- --conf spark.extraListeners=${spark2ExtraListeners}-->
|
|
||||||
<!-- --conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}-->
|
|
||||||
<!-- --conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}-->
|
|
||||||
<!-- --conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}-->
|
|
||||||
<!-- --conf spark.sql.warehouse.dir=${sparkSqlWarehouseDir}-->
|
|
||||||
<!-- </spark-opts>-->
|
|
||||||
<!-- <arg>--inputPath</arg><arg>${workingDir}/ext/publication</arg>-->
|
|
||||||
<!-- <arg>--hiveDbName</arg><arg>${hiveDbName}</arg>-->
|
|
||||||
<!-- <arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.dump.oaf.Publication</arg>-->
|
|
||||||
<!-- -->
|
|
||||||
<!-- </spark>-->
|
|
||||||
<!-- <ok to="End"/>-->
|
|
||||||
<!-- <error to="Kill"/>-->
|
|
||||||
<!-- </action>-->
|
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"paramName":"is",
|
||||||
|
"paramLongName":"isLookUpUrl",
|
||||||
|
"paramDescription": "URL of the isLookUp Service",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName":"hdfsp",
|
||||||
|
"paramLongName":"hdfsPath",
|
||||||
|
"paramDescription": "the path of the folder tofind files to send to Zenodo",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "hdfsnn",
|
||||||
|
"paramLongName": "hdfsNameNode",
|
||||||
|
"paramDescription": "the name node",
|
||||||
|
"paramRequired": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName": "at",
|
||||||
|
"paramLongName": "accessToken",
|
||||||
|
"paramDescription": "the access token for the deposition",
|
||||||
|
"paramRequired": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName":"cu",
|
||||||
|
"paramLongName":"connectionUrl",
|
||||||
|
"paramDescription": "the url to connect to deposit",
|
||||||
|
"paramRequired": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"paramName":"m",
|
||||||
|
"paramLongName":"metadata",
|
||||||
|
"paramDescription": "metadata associated to the deposition",
|
||||||
|
"paramRequired": false
|
||||||
|
}
|
||||||
|
]
|
|
@ -1,54 +1,180 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.graph.dump;
|
package eu.dnetlib.dhp.oa.graph.dump;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.*;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.nio.file.Files;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.*;
|
||||||
|
import org.apache.spark.SparkConf;
|
||||||
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import org.junit.jupiter.api.Assertions;
|
||||||
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
import com.google.gson.Gson;
|
import com.google.gson.Gson;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.zenodo.Creator;
|
import eu.dnetlib.dhp.oa.graph.dump.zenodo.Creator;
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.zenodo.Metadata;
|
import eu.dnetlib.dhp.oa.graph.dump.zenodo.Metadata;
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.zenodo.ZenodoModel;
|
import eu.dnetlib.dhp.oa.graph.dump.zenodo.ZenodoModel;
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.junit.jupiter.api.Assertions;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
|
|
||||||
public class ZenodoUploadTest {
|
public class ZenodoUploadTest {
|
||||||
|
|
||||||
|
private static String workingDir;
|
||||||
|
|
||||||
|
// private static FileSystem fileSystem;
|
||||||
|
//
|
||||||
|
@BeforeAll
|
||||||
|
public static void beforeAll() throws IOException {
|
||||||
|
workingDir = Files
|
||||||
|
.createTempDirectory(eu.dnetlib.dhp.oa.graph.dump.UpdateProjectInfoTest.class.getSimpleName())
|
||||||
|
.toString();
|
||||||
|
}
|
||||||
|
//
|
||||||
|
// Configuration conf = new Configuration();
|
||||||
|
// conf.set("fs.defaultFS", "localhost");
|
||||||
|
//
|
||||||
|
// fileSystem = FileSystem.get(conf);
|
||||||
|
//
|
||||||
|
// FSDataOutputStream fsDataOutputStream = fileSystem.create(new org.apache.hadoop.fs.Path(workingDir + "/ni"));
|
||||||
|
//
|
||||||
|
// BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// writer.write(ZenodoUploadTest.class.getResourceAsStream("/eu/dnetlib/dhp/oa/graph/dump/zenodo/ni").toString());
|
||||||
|
// }
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void HDFSConnection() throws IOException {
|
||||||
|
CommunityMap communityMap = new CommunityMap();
|
||||||
|
communityMap.put("ni", "Neuroinformatics");
|
||||||
|
communityMap.put("dh-ch", "Digital Humanities and Cultural Heritage");
|
||||||
|
LocalFileSystem fs = FileSystem.getLocal(new Configuration());
|
||||||
|
|
||||||
|
fs
|
||||||
|
.copyFromLocalFile(
|
||||||
|
false, new Path(getClass()
|
||||||
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/ni")
|
||||||
|
.getPath()),
|
||||||
|
new Path(workingDir + "/zenodo/ni/ni"));
|
||||||
|
fs
|
||||||
|
.copyFromLocalFile(
|
||||||
|
false, new Path(getClass()
|
||||||
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/dh-ch")
|
||||||
|
.getPath()),
|
||||||
|
new Path(workingDir + "/zenodo/dh-ch/dh-ch"));
|
||||||
|
|
||||||
|
System.out.println("pr");
|
||||||
|
|
||||||
|
// Configuration conf = new Configuration();
|
||||||
|
// conf.set("fs.defaultFS", "localhost");
|
||||||
|
//
|
||||||
|
// APIClient s = new APIClient(
|
||||||
|
// "https://sandbox.zenodo.org/api/deposit/depositions");
|
||||||
|
//
|
||||||
|
// s.connect();
|
||||||
|
// s.upload(workingDir +"/ni", "Neuroinformatics", fs);
|
||||||
|
|
||||||
|
APIClient client = new APIClient("https://sandbox.zenodo.org/api/deposit/depositions",
|
||||||
|
"5ImUj0VC1ICg4ifK5dc3AGzJhcfAB4osxrFlsr8WxHXxjaYgCE0hY8HZcDoe");
|
||||||
|
client.connect();
|
||||||
|
|
||||||
|
// the second boolean parameter here sets the recursion to true
|
||||||
|
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fs
|
||||||
|
.listFiles(
|
||||||
|
new Path(workingDir + "/zenodo"), true);
|
||||||
|
while (fileStatusListIterator.hasNext()) {
|
||||||
|
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||||
|
// do stuff with the file like ...
|
||||||
|
|
||||||
|
// BufferedInputStream bis = new BufferedInputStream(fs.open( fileStatus.getPath()));
|
||||||
|
String p_string = fileStatus.getPath().toString();
|
||||||
|
|
||||||
|
int index = p_string.lastIndexOf("/");
|
||||||
|
String community = p_string.substring(0, index);
|
||||||
|
community = community.substring(community.lastIndexOf("/") + 1);
|
||||||
|
String community_name = communityMap.get(community).replace(" ", "_");
|
||||||
|
fs.copyToLocalFile(fileStatus.getPath(), new Path("/tmp/" + community_name));
|
||||||
|
System.out.println(community);
|
||||||
|
// System.out.println(client.upload(bis, community));
|
||||||
|
|
||||||
|
File f = new File("/tmp/" + community_name);
|
||||||
|
client.upload(f, community_name);
|
||||||
|
|
||||||
|
if (f.exists()) {
|
||||||
|
f.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ZenodoModel zenodo = new ZenodoModel();
|
||||||
|
Metadata data = new Metadata();
|
||||||
|
|
||||||
|
data.setTitle("Dump of OpenAIRE Communities related graph");
|
||||||
|
data.setUpload_type("dataset");
|
||||||
|
data.setDescription("this is a fake uploade done for testing purposes");
|
||||||
|
Creator c = new Creator();
|
||||||
|
c.setName("Miriam Baglioni");
|
||||||
|
c.setAffiliation("CNR _ISTI");
|
||||||
|
data.setCreators(Arrays.asList(c));
|
||||||
|
zenodo.setMetadata(data);
|
||||||
|
|
||||||
|
System.out.println(client.sendMretadata(new Gson().toJson(zenodo)));
|
||||||
|
|
||||||
|
System.out.println(client.publish());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void serializeMetadata() {
|
||||||
|
ZenodoModel zenodo = new ZenodoModel();
|
||||||
|
Metadata data = new Metadata();
|
||||||
|
|
||||||
|
data.setTitle("Dump of OpenAIRE Communities related graph");
|
||||||
|
data.setUpload_type("dataset");
|
||||||
|
data.setDescription("this is a fake uploade done for testing purposes");
|
||||||
|
Creator c = new Creator();
|
||||||
|
c.setName("Miriam Baglioni");
|
||||||
|
c.setAffiliation("CNR _ISTI");
|
||||||
|
data.setCreators(Arrays.asList(c));
|
||||||
|
zenodo.setMetadata(data);
|
||||||
|
|
||||||
|
System.out.println(new Gson().toJson(zenodo));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConnection() throws IOException {
|
public void testConnection() throws IOException {
|
||||||
|
|
||||||
|
// InputStream is = getClass().getClassLoader().getResourceAsStream("eu/dnetlib/dhp/oa/graph/dump/zenodo/ni");
|
||||||
APIClient s = new APIClient(
|
APIClient s = new APIClient(
|
||||||
// "https://sandbox.zenodo.org/api/deposit/depositions?access_token=5ImUj0VC1ICg4ifK5dc3AGzJhcfAB4osxrFlsr8WxHXxjaYgCE0hY8HZcDoe");
|
"https://sandbox.zenodo.org/api/deposit/depositions",
|
||||||
"https://sandbox.zenodo.org/api/deposit/depositions");
|
"5ImUj0VC1ICg4ifK5dc3AGzJhcfAB4osxrFlsr8WxHXxjaYgCE0hY8HZcDoe");
|
||||||
|
|
||||||
Assertions.assertEquals(201, s.connect());
|
Assertions.assertEquals(201, s.connect());
|
||||||
|
|
||||||
s.upload(getClass()
|
s
|
||||||
|
.upload(
|
||||||
|
new File(getClass()
|
||||||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/ni")
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/ni")
|
||||||
.getPath(), "Neuroinformatics");
|
.getPath()),
|
||||||
|
"Neuroinformatics");
|
||||||
|
|
||||||
s.upload(getClass()
|
// s.upload(getClass()
|
||||||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/dh-ch")
|
// .getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/dh-ch")
|
||||||
.getPath(), "DigitalHumanitiesandCulturalHeritage");
|
// .getPath(), "DigitalHumanitiesandCulturalHeritage");
|
||||||
|
//
|
||||||
|
// s.upload(getClass()
|
||||||
|
// .getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/egi")
|
||||||
|
// .getPath(), "EGI");
|
||||||
|
//
|
||||||
|
// s.upload(getClass()
|
||||||
|
// .getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/science-innovation-policy")
|
||||||
|
// .getPath(), "ScienceandInnovationPolicyStudies");
|
||||||
|
|
||||||
s.upload(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/egi")
|
|
||||||
.getPath(), "EGI");
|
|
||||||
|
|
||||||
s.upload(getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/science-innovation-policy")
|
|
||||||
.getPath(), "ScienceandInnovationPolicyStudies");
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// String data = "{\"metadata\": {\"title\": \"My first upload\", " +
|
|
||||||
// "\"upload_type\": \"poster\", " +
|
|
||||||
// "\"description\": \"This is my first upload\", " +
|
|
||||||
// "\"creators\": [{\"name\": \"Doe, John\", " +
|
|
||||||
// "\"affiliation': 'Zenodo'}]
|
|
||||||
//... }
|
|
||||||
//... }
|
|
||||||
//
|
|
||||||
//
|
|
||||||
ZenodoModel zenodo = new ZenodoModel();
|
ZenodoModel zenodo = new ZenodoModel();
|
||||||
Metadata data = new Metadata();
|
Metadata data = new Metadata();
|
||||||
|
|
||||||
|
@ -67,24 +193,4 @@ public class ZenodoUploadTest {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testPublish() throws IOException {
|
|
||||||
APIClient s = new APIClient("https://sandbox.zenodo.org/api/deposit/depositions");
|
|
||||||
s.publish();
|
|
||||||
}
|
|
||||||
@Test
|
|
||||||
public void testUpload() throws IOException {
|
|
||||||
|
|
||||||
APIClient s = new APIClient(
|
|
||||||
"https://sandbox.zenodo.org/api/deposit/depositions?access_token=5ImUj0VC1ICg4ifK5dc3AGzJhcfAB4osxrFlsr8WxHXxjaYgCE0hY8HZcDoe");
|
|
||||||
final String sourcePath = getClass()
|
|
||||||
.getResource("/eu/dnetlib/dhp/oa/graph/dump/zenodo/ni")
|
|
||||||
.getPath();
|
|
||||||
|
|
||||||
s.upload(sourcePath, "Neuroinformatics");
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue