forked from D-Net/dnet-hadoop
updated stuff for zenodo upload
This commit is contained in:
parent
eeebd5a920
commit
264723ffd8
|
@ -42,22 +42,33 @@ class QueryTest {
|
|||
}
|
||||
|
||||
|
||||
def myQuery(spark:SparkSession, sc:SparkContext): Unit = {
|
||||
implicit val mapEncoderPub: Encoder[Publication] = Encoders.kryo[Publication]
|
||||
|
||||
|
||||
|
||||
val mapper = new ObjectMapper()
|
||||
mapper.getSerializationConfig.enable(SerializationConfig.Feature.INDENT_OUTPUT)
|
||||
|
||||
|
||||
val ds:Dataset[Publication] = spark.read.load("/tmp/p").as[Publication]
|
||||
|
||||
|
||||
|
||||
ds.filter(p =>p.getBestaccessright!= null && p.getBestaccessright.getClassname.nonEmpty).count()
|
||||
def extractId(input:String):String = {
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
lazy val json: json4s.JValue = parse(input)
|
||||
(json \ "id").extractOrElse[String](null)
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
def myQuery(spark:SparkSession, sc:SparkContext): Unit = {
|
||||
implicit val mapEncoderPub: Encoder[Publication] = Encoders.kryo[Publication]
|
||||
val mapper = new ObjectMapper()
|
||||
mapper.getSerializationConfig.enable(SerializationConfig.Feature.INDENT_OUTPUT)
|
||||
|
||||
val ds:Dataset[Publication] = spark.read.load("/tmp/p").as[Publication]
|
||||
|
||||
|
||||
val sc = spark.sparkContext
|
||||
|
||||
|
||||
ds.filter(p =>p.getBestaccessright!= null && p.getBestaccessright.getClassname.nonEmpty).count()
|
||||
val typologies =List("dataset","datasource","organization","otherresearchproduct","project","publication","software")
|
||||
val basePath ="/tt"
|
||||
|
||||
typologies.map(tp => sc.textFile(s"$basePath/dataset").map(s =>extractId(tp) ).distinct.count()).sum()
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||
import eu.dnetlib.dhp.common.MakeTarArchive;
|
||||
|
||||
public class MakeTar implements Serializable {
|
||||
|
||||
|
@ -41,71 +42,71 @@ public class MakeTar implements Serializable {
|
|||
|
||||
FileSystem fileSystem = FileSystem.get(conf);
|
||||
|
||||
makeTArArchive(fileSystem, inputPath, outputPath);
|
||||
MakeTarArchive.tarMaxSize(fileSystem, inputPath, outputPath, "scholix_dump", 25);
|
||||
|
||||
}
|
||||
|
||||
public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath) throws IOException {
|
||||
|
||||
RemoteIterator<LocatedFileStatus> dir_iterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
||||
|
||||
while (dir_iterator.hasNext()) {
|
||||
LocatedFileStatus fileStatus = dir_iterator.next();
|
||||
|
||||
Path p = fileStatus.getPath();
|
||||
String p_string = p.toString();
|
||||
String entity = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||
|
||||
write(fileSystem, p_string, outputPath + "/" + entity + ".tar", entity);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dir_name)
|
||||
throws IOException {
|
||||
|
||||
Path hdfsWritePath = new Path(outputPath);
|
||||
FSDataOutputStream fsDataOutputStream = null;
|
||||
if (fileSystem.exists(hdfsWritePath)) {
|
||||
fileSystem.delete(hdfsWritePath, true);
|
||||
|
||||
}
|
||||
fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
||||
|
||||
TarArchiveOutputStream ar = new TarArchiveOutputStream(fsDataOutputStream.getWrappedStream());
|
||||
|
||||
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||
.listFiles(
|
||||
new Path(inputPath), true);
|
||||
|
||||
while (fileStatusListIterator.hasNext()) {
|
||||
LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||
|
||||
Path p = fileStatus.getPath();
|
||||
String p_string = p.toString();
|
||||
if (!p_string.endsWith("_SUCCESS")) {
|
||||
String name = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||
TarArchiveEntry entry = new TarArchiveEntry(dir_name + "/" + name + ".json.gz");
|
||||
entry.setSize(fileStatus.getLen());
|
||||
ar.putArchiveEntry(entry);
|
||||
|
||||
InputStream is = fileSystem.open(fileStatus.getPath());
|
||||
|
||||
BufferedInputStream bis = new BufferedInputStream(is);
|
||||
|
||||
int count;
|
||||
byte data[] = new byte[1024];
|
||||
while ((count = bis.read(data, 0, data.length)) != -1) {
|
||||
ar.write(data, 0, count);
|
||||
}
|
||||
bis.close();
|
||||
ar.closeArchiveEntry();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ar.close();
|
||||
}
|
||||
// public static void makeTArArchive(FileSystem fileSystem, String inputPath, String outputPath) throws IOException {
|
||||
//
|
||||
// RemoteIterator<LocatedFileStatus> dir_iterator = fileSystem.listLocatedStatus(new Path(inputPath));
|
||||
//
|
||||
// while (dir_iterator.hasNext()) {
|
||||
// LocatedFileStatus fileStatus = dir_iterator.next();
|
||||
//
|
||||
// Path p = fileStatus.getPath();
|
||||
// String p_string = p.toString();
|
||||
// String entity = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||
//
|
||||
// write(fileSystem, p_string, outputPath + "/" + entity + ".tar", entity);
|
||||
// }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// private static void write(FileSystem fileSystem, String inputPath, String outputPath, String dir_name)
|
||||
// throws IOException {
|
||||
//
|
||||
// Path hdfsWritePath = new Path(outputPath);
|
||||
// FSDataOutputStream fsDataOutputStream = null;
|
||||
// if (fileSystem.exists(hdfsWritePath)) {
|
||||
// fileSystem.delete(hdfsWritePath, true);
|
||||
//
|
||||
// }
|
||||
// fsDataOutputStream = fileSystem.create(hdfsWritePath);
|
||||
//
|
||||
// TarArchiveOutputStream ar = new TarArchiveOutputStream(fsDataOutputStream.getWrappedStream());
|
||||
//
|
||||
// RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
|
||||
// .listFiles(
|
||||
// new Path(inputPath), true);
|
||||
//
|
||||
// while (fileStatusListIterator.hasNext()) {
|
||||
// LocatedFileStatus fileStatus = fileStatusListIterator.next();
|
||||
//
|
||||
// Path p = fileStatus.getPath();
|
||||
// String p_string = p.toString();
|
||||
// if (!p_string.endsWith("_SUCCESS")) {
|
||||
// String name = p_string.substring(p_string.lastIndexOf("/") + 1);
|
||||
// TarArchiveEntry entry = new TarArchiveEntry(dir_name + "/" + name + ".json.gz");
|
||||
// entry.setSize(fileStatus.getLen());
|
||||
// ar.putArchiveEntry(entry);
|
||||
//
|
||||
// InputStream is = fileSystem.open(fileStatus.getPath());
|
||||
//
|
||||
// BufferedInputStream bis = new BufferedInputStream(is);
|
||||
//
|
||||
// int count;
|
||||
// byte data[] = new byte[1024];
|
||||
// while ((count = bis.read(data, 0, data.length)) != -1) {
|
||||
// ar.write(data, 0, count);
|
||||
// }
|
||||
// bis.close();
|
||||
// ar.closeArchiveEntry();
|
||||
//
|
||||
// }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// ar.close();
|
||||
// }
|
||||
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class SendToZenodoHDFS implements Serializable {
|
|||
}
|
||||
|
||||
zenodoApiClient.sendMretadata(metadata);
|
||||
zenodoApiClient.publish();
|
||||
// zenodoApiClient.publish();
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -8,10 +8,10 @@
|
|||
<name>targetPath</name>
|
||||
<description>the target path</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>metadata</name>
|
||||
<description>the metadata</description>
|
||||
</property>
|
||||
<!-- <property>-->
|
||||
<!-- <name>metadata</name>-->
|
||||
<!-- <description>the metadata</description>-->
|
||||
<!-- </property>-->
|
||||
</parameters>
|
||||
|
||||
<start to="send_zenodo"/>
|
||||
|
@ -34,20 +34,20 @@
|
|||
</action>
|
||||
|
||||
|
||||
<action name="send_zenodo">
|
||||
<java>
|
||||
<main-class>eu.dnetlib.dhp.export.zenodo.SendToZenodoHDFS</main-class>
|
||||
<arg>--hdfsPath</arg><arg>/user/dnet.scholexplorer/scholix/provision/scholix.tar/scholix-2020-10-16.tar</arg>
|
||||
<arg>--nameNode</arg><arg>${nameNode}</arg>
|
||||
<arg>--accessToken</arg><arg>b6ddrY6b77WxcDEevn9gqVE5sL5sDNjdUijt75W3o7cQo5vpFFI48dMiu8Gv</arg>
|
||||
<arg>--connectionUrl</arg><arg>https://zenodo.org/api/deposit/depositions</arg>
|
||||
<arg>--metadata</arg><arg>${metadata}</arg>
|
||||
<arg>--conceptRecordId</arg><arg>1200252</arg>
|
||||
<arg>--newDeposition</arg><arg>false</arg>
|
||||
</java>
|
||||
<ok to="End"/>
|
||||
<error to="Kill"/>
|
||||
</action>
|
||||
<!-- <action name="send_zenodo">-->
|
||||
<!-- <java>-->
|
||||
<!-- <main-class>eu.dnetlib.dhp.export.zenodo.SendToZenodoHDFS</main-class>-->
|
||||
<!-- <arg>--hdfsPath</arg><arg>/user/dnet.scholexplorer/scholix/provision/scholix.tar/scholix-2020-10-16.tar</arg>-->
|
||||
<!-- <arg>--nameNode</arg><arg>${nameNode}</arg>-->
|
||||
<!-- <arg>--accessToken</arg><arg>b6ddrY6b77WxcDEevn9gqVE5sL5sDNjdUijt75W3o7cQo5vpFFI48dMiu8Gv</arg>-->
|
||||
<!-- <arg>--connectionUrl</arg><arg>https://zenodo.org/api/deposit/depositions</arg>-->
|
||||
<!-- <arg>--metadata</arg><arg>${metadata}</arg>-->
|
||||
<!-- <arg>--conceptRecordId</arg><arg>1200252</arg>-->
|
||||
<!-- <arg>--newDeposition</arg><arg>false</arg>-->
|
||||
<!-- </java>-->
|
||||
<!-- <ok to="End"/>-->
|
||||
<!-- <error to="Kill"/>-->
|
||||
<!-- </action>-->
|
||||
|
||||
<end name="End"/>
|
||||
</workflow-app>
|
Loading…
Reference in New Issue