changed logic to be able to create a dump for a single community at a time

This commit is contained in:
Miriam Baglioni 2021-04-13 16:32:19 +02:00
parent 6179deb836
commit 8c4c74a640
4 changed files with 93 additions and 14 deletions

View File

@ -37,7 +37,8 @@ public class DumpProducts implements Serializable {
isSparkSessionManaged,
spark -> {
Utils.removeOutputDir(spark, outputPath);
execDump(spark, inputPath, outputPath, communityMapPath, inputClazz, outputClazz, dumpType);
execDump(
spark, inputPath, outputPath, communityMapPath, inputClazz, outputClazz, dumpType);
});
}

View File

@ -1,6 +1,7 @@
package eu.dnetlib.dhp.oa.graph.dump;
import java.io.IOException;
import java.io.Serializable;
import java.util.Optional;
@ -9,6 +10,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.jetbrains.annotations.NotNull;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.api.MissingConceptDoiException;
@ -48,15 +50,12 @@ public class SendToZenodoHDFS implements Serializable {
.orElse(false);
final String depositionId = Optional.ofNullable(parser.get("depositionId")).orElse(null);
final String communityMapPath = parser.get("communityMapPath");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", hdfsNameNode);
FileSystem fileSystem = FileSystem.get(conf);
CommunityMap communityMap = Utils.readCommunityMap(fileSystem, communityMapPath);
RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem
.listFiles(
new Path(hdfsPath), true);
@ -87,11 +86,6 @@ public class SendToZenodoHDFS implements Serializable {
if (!p_string.endsWith("_SUCCESS")) {
// String tmp = p_string.substring(0, p_string.lastIndexOf("/"));
String name = p_string.substring(p_string.lastIndexOf("/") + 1);
log.info("Sending information for community: " + name);
if (communityMap.containsKey(name.substring(0, name.lastIndexOf(".")))) {
name = communityMap.get(name.substring(0, name.lastIndexOf("."))).replace(" ", "_") + ".tar";
}
FSDataInputStream inputStream = fileSystem.open(p);
zenodoApiClient.uploadIS(inputStream, name, fileStatus.getLen());

View File

@ -34,12 +34,14 @@ public class CommunitySplit implements Serializable {
isSparkSessionManaged,
spark -> {
Utils.removeOutputDir(spark, outputPath);
execSplit(spark, inputPath, outputPath, Utils.getCommunityMap(spark, communityMapPath).keySet());
CommunityMap communityMap = Utils.getCommunityMap(spark, communityMapPath);
execSplit(spark, inputPath, outputPath, communityMap);
});
}
private static void execSplit(SparkSession spark, String inputPath, String outputPath,
Set<String> communities) {
CommunityMap communities) {
Dataset<CommunityResult> result = Utils
.readPath(spark, inputPath + "/publication", CommunityResult.class)
@ -48,8 +50,9 @@ public class CommunitySplit implements Serializable {
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
communities
.keySet()
.stream()
.forEach(c -> printResult(c, result, outputPath));
.forEach(c -> printResult(c, result, outputPath + "/" + communities.get(c).replace(" ", "_")));
}
@ -61,7 +64,7 @@ public class CommunitySplit implements Serializable {
.write()
.option("compression", "gzip")
.mode(SaveMode.Overwrite)
.json(outputPath + "/" + c);
.json(outputPath);
}

View File

@ -1,4 +1,85 @@
package eu.dnetlib.dhp.oa.graph.dump.community;
public class RemoveCommunities {
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
public class RemoveCommunities implements Serializable {
private static final Logger log = LoggerFactory.getLogger(RemoveCommunities.class);
private final static ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private final Configuration conf;
private final BufferedWriter writer;
private final CommunityMap communityMap;
public RemoveCommunities(String path, String hdfsNameNode) throws IOException {
conf = new Configuration();
conf.set("fs.defaultFS", hdfsNameNode);
FileSystem fileSystem = FileSystem.get(conf);
Path hdfsPath = new Path(path);
// FSDataInputStream p = fileSystem.open(hdfsPath);
// ObjectMapper mapper = new ObjectMapper();
communityMap = OBJECT_MAPPER.readValue((InputStream) fileSystem.open(hdfsPath), CommunityMap.class);
FSDataOutputStream fsDataOutputStream = null;
if (fileSystem.exists(hdfsPath)) {
fileSystem.delete(hdfsPath);
}
fsDataOutputStream = fileSystem.create(hdfsPath);
writer = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
}
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
RemoveCommunities.class
.getResourceAsStream(
"/eu/dnetlib/dhp/oa/graph/dump/input_rc_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
final String nameNode = parser.get("nameNode");
log.info("nameNode: {}", nameNode);
final String outputPath = parser.get("path");
log.info("outputPath: {}", outputPath);
final String communityId = parser.get("communityId");
final RemoveCommunities scm = new RemoveCommunities(outputPath, nameNode);
scm.removeCommunities(communityId);
}
private void removeCommunities(String communityId) throws IOException {
Set<String> toRemove = communityMap.keySet().stream().map(key -> {
if (key.equals(communityId))
return null;
return key;
}).filter(Objects::nonNull).collect(Collectors.toSet());
toRemove.forEach(key -> communityMap.remove(key));
writer.write(OBJECT_MAPPER.writeValueAsString(communityMap));
writer.close();
}
}