forked from D-Net/dnet-hadoop
updated stuff for zenodo upload
This commit is contained in:
parent
eeebd5a920
commit
264723ffd8
|
@ -42,21 +42,32 @@ class QueryTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def extractId(input:String):String = {
|
||||||
|
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||||
|
lazy val json: json4s.JValue = parse(input)
|
||||||
|
(json \ "id").extractOrElse[String](null)
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def myQuery(spark:SparkSession, sc:SparkContext): Unit = {
|
def myQuery(spark:SparkSession, sc:SparkContext): Unit = {
|
||||||
implicit val mapEncoderPub: Encoder[Publication] = Encoders.kryo[Publication]
|
implicit val mapEncoderPub: Encoder[Publication] = Encoders.kryo[Publication]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
val mapper = new ObjectMapper()
|
val mapper = new ObjectMapper()
|
||||||
mapper.getSerializationConfig.enable(SerializationConfig.Feature.INDENT_OUTPUT)
|
mapper.getSerializationConfig.enable(SerializationConfig.Feature.INDENT_OUTPUT)
|
||||||
|
|
||||||
|
|
||||||
val ds:Dataset[Publication] = spark.read.load("/tmp/p").as[Publication]
|
val ds:Dataset[Publication] = spark.read.load("/tmp/p").as[Publication]
|
||||||
|
|
||||||
|
|
||||||
|
val sc = spark.sparkContext
|
||||||
|
|
||||||
|
|
||||||
ds.filter(p =>p.getBestaccessright!= null && p.getBestaccessright.getClassname.nonEmpty).count()
|
ds.filter(p =>p.getBestaccessright!= null && p.getBestaccessright.getClassname.nonEmpty).count()
|
||||||
|
val typologies =List("dataset","datasource","organization","otherresearchproduct","project","publication","software")
|
||||||
|
val basePath ="/tt"
|
||||||
|
|
||||||
|
typologies.map(tp => sc.textFile(s"$basePath/dataset").map(s =>extractId(tp) ).distinct.count()).sum()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.common.MakeTarArchive;
|
||||||
|
|
||||||
public class MakeTar implements Serializable {
|
public class MakeTar implements Serializable {
|
||||||
|
|
||||||
|
@ -41,71 +42,71 @@ public class MakeTar implements Serializable {
|
||||||
|
|
||||||
FileSystem fileSystem = FileSystem.get(conf);
|
FileSystem fileSystem = FileSystem.get(conf);
|
||||||
|
|
||||||
makeTArArchive(fileSystem, inputPath, outputPath);
|
MakeTarArchive.tarMaxSize(fileSystem, inputPath, outputPath, "scholix_dump", 25);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath) throws IOException {
|
// public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath) throws IOException {
|
||||||
|
//
|
||||||
RemoteIterator<LocatedFileStatus> dir_iterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
// RemoteIterator<LocatedFileStatus> dir_iterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
||||||
|
//
|
||||||
while (dir_iterator.hasNext()) {
|
// while (dir_iterator.hasNext()) {
|
||||||
LocatedFileStatus fileStatus = dir_iterator.next();
|
// LocatedFileStatus fileStatus = dir_iterator.next();
|
||||||
|
//
|
||||||
Path p = fileStatus.getPath();
|
// Path p = fileStatus.getPath();
|
||||||
String p_string = p.toString();
|
// String p_string = p.toString();
|
||||||
String entity = p_string.substring(p_string.lastIndexOf("/") + 1);
|
// String entity = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||||
|
//
|
||||||
write(fileSystem, p_string, outputPath + "/" + entity + ".tar", entity);
|
// write(fileSystem, p_string, outputPath + "/" + entity + ".tar", entity);
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dir_name)
|
// private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dir_name)
|
||||||
throws IOException {
|
// throws IOException {
|
||||||
|
//
|
||||||
Path hdfsWritePath = new Path(outputPath);
|
// Path hdfsWritePath = new Path(outputPath);
|
||||||
FSDataOutputStream fsDataOutputStream = null;
|
// FSDataOutputStream fsDataOutputStream = null;
|
||||||
if (fileSystem.exists(hdfsWritePath)) {
|
// if (fileSystem.exists(hdfsWritePath)) {
|
||||||
fileSystem.delete(hdfsWritePath, true);
|
// fileSystem.delete(hdfsWritePath, true);
|
||||||
|
//
|
||||||
}
|
// }
|
||||||
fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
// fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
||||||
|
//
|
||||||
TarArchiveOutputStream ar = new TarArchiveOutputStream(fsDataOutputStream.getWrappedStream());
|
// TarArchiveOutputStream ar = new TarArchiveOutputStream(fsDataOutputStream.getWrappedStream());
|
||||||
|
//
|
||||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
// RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||||
.listFiles(
|
// .listFiles(
|
||||||
new Path(inputPath), true);
|
// new Path(inputPath), true);
|
||||||
|
//
|
||||||
while (fileStatusListIterator.hasNext()) {
|
// while (fileStatusListIterator.hasNext()) {
|
||||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
// LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||||
|
//
|
||||||
Path p = fileStatus.getPath();
|
// Path p = fileStatus.getPath();
|
||||||
String p_string = p.toString();
|
// String p_string = p.toString();
|
||||||
if (!p_string.endsWith("_SUCCESS")) {
|
// if (!p_string.endsWith("_SUCCESS")) {
|
||||||
String name = p_string.substring(p_string.lastIndexOf("/") + 1);
|
// String name = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||||
TarArchiveEntry entry = new TarArchiveEntry(dir_name + "/" + name + ".json.gz");
|
// TarArchiveEntry entry = new TarArchiveEntry(dir_name + "/" + name + ".json.gz");
|
||||||
entry.setSize(fileStatus.getLen());
|
// entry.setSize(fileStatus.getLen());
|
||||||
ar.putArchiveEntry(entry);
|
// ar.putArchiveEntry(entry);
|
||||||
|
//
|
||||||
InputStream is = fileSystem.open(fileStatus.getPath());
|
// InputStream is = fileSystem.open(fileStatus.getPath());
|
||||||
|
//
|
||||||
BufferedInputStream bis = new BufferedInputStream(is);
|
// BufferedInputStream bis = new BufferedInputStream(is);
|
||||||
|
//
|
||||||
int count;
|
// int count;
|
||||||
byte data[] = new byte[1024];
|
// byte data[] = new byte[1024];
|
||||||
while ((count = bis.read(data, 0, data.length)) != -1) {
|
// while ((count = bis.read(data, 0, data.length)) != -1) {
|
||||||
ar.write(data, 0, count);
|
// ar.write(data, 0, count);
|
||||||
}
|
// }
|
||||||
bis.close();
|
// bis.close();
|
||||||
ar.closeArchiveEntry();
|
// ar.closeArchiveEntry();
|
||||||
|
//
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
ar.close();
|
// ar.close();
|
||||||
}
|
// }
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class SendToZenodoHDFS implements Serializable {
|
||||||
}
|
}
|
||||||
|
|
||||||
zenodoApiClient.sendMretadata(metadata);
|
zenodoApiClient.sendMretadata(metadata);
|
||||||
zenodoApiClient.publish();
|
// zenodoApiClient.publish();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,10 +8,10 @@
|
||||||
<name>targetPath</name>
|
<name>targetPath</name>
|
||||||
<description>the target path</description>
|
<description>the target path</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<!-- <property>-->
|
||||||
<name>metadata</name>
|
<!-- <name>metadata</name>-->
|
||||||
<description>the metadata</description>
|
<!-- <description>the metadata</description>-->
|
||||||
</property>
|
<!-- </property>-->
|
||||||
</parameters>
|
</parameters>
|
||||||
|
|
||||||
<start to="send_zenodo"/>
|
<start to="send_zenodo"/>
|
||||||
|
@ -34,20 +34,20 @@
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
|
||||||
<action name="send_zenodo">
|
<!-- <action name="send_zenodo">-->
|
||||||
<java>
|
<!-- <java>-->
|
||||||
<main-class>eu.dnetlib.dhp.export.zenodo.SendToZenodoHDFS</main-class>
|
<!-- <main-class>eu.dnetlib.dhp.export.zenodo.SendToZenodoHDFS</main-class>-->
|
||||||
<arg>--hdfsPath</arg><arg>/user/dnet.scholexplorer/scholix/provision/scholix.tar/scholix-2020-10-16.tar</arg>
|
<!-- <arg>--hdfsPath</arg><arg>/user/dnet.scholexplorer/scholix/provision/scholix.tar/scholix-2020-10-16.tar</arg>-->
|
||||||
<arg>--nameNode</arg><arg>${nameNode}</arg>
|
<!-- <arg>--nameNode</arg><arg>${nameNode}</arg>-->
|
||||||
<arg>--accessToken</arg><arg>b6ddrY6b77WxcDEevn9gqVE5sL5sDNjdUijt75W3o7cQo5vpFFI48dMiu8Gv</arg>
|
<!-- <arg>--accessToken</arg><arg>b6ddrY6b77WxcDEevn9gqVE5sL5sDNjdUijt75W3o7cQo5vpFFI48dMiu8Gv</arg>-->
|
||||||
<arg>--connectionUrl</arg><arg>https://zenodo.org/api/deposit/depositions</arg>
|
<!-- <arg>--connectionUrl</arg><arg>https://zenodo.org/api/deposit/depositions</arg>-->
|
||||||
<arg>--metadata</arg><arg>${metadata}</arg>
|
<!-- <arg>--metadata</arg><arg>${metadata}</arg>-->
|
||||||
<arg>--conceptRecordId</arg><arg>1200252</arg>
|
<!-- <arg>--conceptRecordId</arg><arg>1200252</arg>-->
|
||||||
<arg>--newDeposition</arg><arg>false</arg>
|
<!-- <arg>--newDeposition</arg><arg>false</arg>-->
|
||||||
</java>
|
<!-- </java>-->
|
||||||
<ok to="End"/>
|
<!-- <ok to="End"/>-->
|
||||||
<error to="Kill"/>
|
<!-- <error to="Kill"/>-->
|
||||||
</action>
|
<!-- </action>-->
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
</workflow-app>
|
</workflow-app>
|
Loading…
Reference in New Issue