Publishing externalized to libraries in classpath
parent
961676484f
commit
8030b049d0
@ -1,116 +0,0 @@
|
||||
package org.gcube.smartgears.handlers;
|
||||
|
||||
import static org.gcube.smartgears.utils.Utils.notEmpty;
|
||||
import static org.gcube.smartgears.utils.Utils.rethrowUnchecked;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.common.resources.gcore.Resource;
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public abstract class AbstractProfilePublisher<P extends Resource> implements ProfilePublisher {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(AbstractProfilePublisher.class);
|
||||
|
||||
//the underlying IS publisher
|
||||
private final ScopedPublisher publisher;
|
||||
|
||||
//private AuthorizationProvider authProvider ;
|
||||
|
||||
public AbstractProfilePublisher() {
|
||||
this.publisher=ProviderFactory.provider().publisher();
|
||||
//this.authProvider = ProviderFactory.provider().authorizationProvider();
|
||||
}
|
||||
|
||||
protected abstract P getProfile();
|
||||
protected abstract boolean isRoot();
|
||||
protected abstract void sharePublished(P profile);
|
||||
protected abstract Set<String> getAllowedContexts();
|
||||
|
||||
/**
|
||||
* Removes the application from one or more scopes.
|
||||
* @param scopes the scopes
|
||||
*/
|
||||
public void removeFrom(Collection<String> contexts) {
|
||||
P profile = getProfile();
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.remove(profile, new ArrayList<String>(contexts));
|
||||
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
log.debug("after remove application profile contains scopes {}",profile.scopes().asCollection());
|
||||
sharePublished(profile);
|
||||
}
|
||||
|
||||
|
||||
public void addToAll(){
|
||||
this.addTo(getAllowedContexts());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds for the first time the current resource profile of the application in one or more scopes.
|
||||
* @param contexts the contexts
|
||||
*/
|
||||
public void addTo(Collection<String> contexts) {
|
||||
notEmpty("contexts",contexts);
|
||||
|
||||
P profile = getProfile();
|
||||
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
if (isRoot()) Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.create(profile, new ArrayList<String>(contexts));
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot()) Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
|
||||
sharePublished(profile);
|
||||
log.debug("shared profile with scopes {}", profile.scopes().asCollection());
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void update() {
|
||||
P profile = getProfile();
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.update(profile);
|
||||
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
|
||||
sharePublished(profile);
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,208 @@
|
||||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.gcube.common.events.Observes.Kind.critical;
|
||||
import static org.gcube.common.events.Observes.Kind.resilient;
|
||||
import static org.gcube.smartgears.Constants.profile_management;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.addToContext;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.changed;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.removeFromContext;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.failure;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.part_activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.shutdown;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.stop;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerState.active;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.handlers.container.ContainerLifecycleEvent;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.gcube.smartgears.publishing.Publisher;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
* Manages the resource profile of the application.
|
||||
* <p>
|
||||
*
|
||||
* The manager:
|
||||
*
|
||||
* <ul>
|
||||
* <li>creates the profile when the application starts for the first time;
|
||||
* <li>loads the profile when the application restarts;
|
||||
* <li>publishes the profile when the application becomes active, and at any
|
||||
* lifecycle change thereafter;
|
||||
* <li>stores the profile locally after each publication;
|
||||
* </ul>
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
* @see ProfileBuilder
|
||||
* @see ProfilePublisherImpl
|
||||
*/
|
||||
public class ContainerProfileManager extends ContainerHandler {
|
||||
|
||||
Logger log = LoggerFactory.getLogger(ContainerProfileManager.class);
|
||||
|
||||
private ContainerContext context;
|
||||
private ScheduledFuture<?> periodicUpdates;
|
||||
|
||||
private List<Publisher> publishers;
|
||||
|
||||
|
||||
@Override
|
||||
public void onStart(ContainerLifecycleEvent.Start e) {
|
||||
|
||||
context = e.context();
|
||||
|
||||
activated();
|
||||
|
||||
schedulePeriodicUpdates();
|
||||
// note we don't fire profile events, but wait for the final startup
|
||||
// outcome which
|
||||
// will result in a state change. only then we publish and store the
|
||||
// profile
|
||||
// this avoids the redundancy and performance penalty of storing and
|
||||
// publishing multiple
|
||||
// times in rapid succession (which would be correct). Revise if proves
|
||||
// problematic in corner
|
||||
// cases.
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void activated(){
|
||||
publishers = context.configuration().mode()!=Mode.offline?
|
||||
ProviderFactory.provider().publishers():
|
||||
Collections.emptyList();
|
||||
registerObservers();
|
||||
schedulePeriodicUpdates();
|
||||
}
|
||||
|
||||
private void registerObservers() {
|
||||
context.events().subscribe(new Object() {
|
||||
@Observes({ activation, part_activation, shutdown, stop, failure })
|
||||
void onChanged(ContainerLifecycle lc) {
|
||||
|
||||
// since we do not know the observers, they will deal with failures and their consequences
|
||||
// any that comes back will be logged in this event thread
|
||||
context.events().fire(context, changed);
|
||||
}
|
||||
|
||||
@Observes(value = changed, kind = critical)
|
||||
void publishAfterChange(ContainerContext context) {
|
||||
log.info("Publish after profile Change event called");
|
||||
//if we've failed before first publication do not try to publish
|
||||
//(we may well have failed there)
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
publisher.publishContainer(context,
|
||||
context.configuration().authorizationProvider().getContexts());
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot publish containar with publisher type {} (see details)", publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
}
|
||||
|
||||
@Observes(value = addToContext)
|
||||
void addTo(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.trace("publishing container within new scope");
|
||||
publisher.publishContainer(context,
|
||||
Collections.singleton(scope));
|
||||
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add container to {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = removeFromContext)
|
||||
void removeFrom(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.trace("unpublishing container from context {}", scope);
|
||||
publisher.unpublishContainer(context,
|
||||
Collections.singleton(scope));
|
||||
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add container to {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private void schedulePeriodicUpdates() {
|
||||
// register to cancel updates
|
||||
context.events().subscribe(
|
||||
new Object() {
|
||||
// we register it in response to lifecycle events so that we can stop and resume along with application
|
||||
@Observes(value = { activation, part_activation }, kind = resilient)
|
||||
synchronized void restartPeriodicUpdates(ContainerLifecycle lc) {
|
||||
//already running
|
||||
if (periodicUpdates!=null)
|
||||
return;
|
||||
if (lc.state()==active)
|
||||
log.info("scheduling periodic updates of container profile");
|
||||
else
|
||||
log.info("resuming periodic updates of container profile");
|
||||
final Runnable updateTask = new Runnable() {
|
||||
public void run() {
|
||||
context.events().fire(context,changed);
|
||||
}
|
||||
};
|
||||
periodicUpdates = Utils.scheduledServicePool.scheduleAtFixedRate(updateTask, 3, context.configuration()
|
||||
.publicationFrequency(), SECONDS);
|
||||
}
|
||||
@Observes(value = { stop, failure, shutdown }, kind = resilient)
|
||||
synchronized void cancelPeriodicUpdates(ContainerLifecycle ignore) {
|
||||
if (periodicUpdates != null){
|
||||
log.trace("stopping periodic updates of container profile");
|
||||
try {
|
||||
periodicUpdates.cancel(true);
|
||||
periodicUpdates=null;
|
||||
}
|
||||
catch(Exception e) {
|
||||
log.warn("could not stop periodic updates of container profile",e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return profile_management;
|
||||
}
|
||||
}
|
||||
|
@ -1,54 +0,0 @@
|
||||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.AbstractProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.ProfileEvents;
|
||||
|
||||
/**
|
||||
* Publishes the resource profile of the container.
|
||||
* <p>
|
||||
* Distinguishes publication in new scopes ({@link #addTo(List)} from publication updates in existing scopes ({@link #update(List)}.
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
*
|
||||
*/
|
||||
public class ContainerPublisher extends AbstractProfilePublisher<HostingNode> {
|
||||
|
||||
|
||||
private final ContainerContext context;
|
||||
|
||||
public ContainerPublisher(ContainerContext context) {
|
||||
super();
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
|
||||
protected void sharePublished(HostingNode profile) {
|
||||
context.events().fire(profile,ProfileEvents.published);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected HostingNode getProfile() {
|
||||
return context.profile();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isRoot() {
|
||||
return context.configuration().mode()!=Mode.root;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Set<String> getAllowedContexts() {
|
||||
return context.configuration().allowedContexts();
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* @author Luca Frosini (ISTI-CNR)
|
||||
*/
|
||||
public class LinuxDistributionInfo {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(LinuxDistributionInfo.class);
|
||||
|
||||
public static final String LSB_RELEASE_COMMAND = "lsb_release -a";
|
||||
public static final String OS_RELEASE_FILE_PATH = "/etc/os-release";
|
||||
|
||||
protected Map<String, String> info;
|
||||
|
||||
protected Map<String, String> getInfoViaLsbReleaseCommand() throws IOException {
|
||||
logger.trace("Going to exec {}", LSB_RELEASE_COMMAND);
|
||||
Process process = Runtime.getRuntime().exec(LSB_RELEASE_COMMAND);
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
|
||||
Map<String, String> map = parseBufferedReader(bufferedReader);
|
||||
bufferedReader.close();
|
||||
return map;
|
||||
}
|
||||
|
||||
private Map<String, String> parseBufferedReader(BufferedReader bufferedReader) throws IOException {
|
||||
Map<String, String> map = new HashMap<>();
|
||||
String line = "";
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
String[] nameValue = parseLine(line);
|
||||
map.put(nameValue[0], nameValue[1]);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private String[] parseLine(String line) {
|
||||
String[] splitted = line.split("=");
|
||||
if (splitted.length < 2) {
|
||||
splitted = line.split(":");
|
||||
}
|
||||
String[] ret = new String[2];
|
||||
ret[0] = splitted[0].trim();
|
||||
ret[1] = splitted[1].trim().replace("\"", "");
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Map<String, String> getInfoViaFile(File file) throws IOException {
|
||||
logger.trace("Going to read file {}", file.getAbsolutePath());
|
||||
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
|
||||
Map<String, String> map = parseBufferedReader(bufferedReader);
|
||||
bufferedReader.close();
|
||||
return map;
|
||||
|
||||
}
|
||||
|
||||
protected Map<String, String> getInfoViaOsReleaseFile() throws IOException {
|
||||
File osReleaseFile = new File(OS_RELEASE_FILE_PATH);
|
||||
return getInfoViaFile(osReleaseFile);
|
||||
}
|
||||
|
||||
private Map<String, String> retriveInfo() {
|
||||
try {
|
||||
return getInfoViaLsbReleaseCommand();
|
||||
} catch (IOException e) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
return getInfoViaOsReleaseFile();
|
||||
}catch (IOException e) {
|
||||
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public Map<String, String> getInfo() {
|
||||
if (info == null) {
|
||||
info = retriveInfo();
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
}
|
@ -1,375 +0,0 @@
|
||||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
import java.math.BigDecimal;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.GHNType;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.Processor;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.Variable;
|
||||
import org.gcube.common.resources.gcore.utils.Group;
|
||||
import org.gcube.smartgears.configuration.container.ContainerConfiguration;
|
||||
import org.gcube.smartgears.configuration.library.SmartGearsConfiguration;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* @author Fabio Simeoni
|
||||
* @author Luca Frosini (ISTI - CNR)
|
||||
*
|
||||
*/
|
||||
public class ProfileBuilder {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ProfileBuilder.class);
|
||||
|
||||
private ContainerContext context;
|
||||
|
||||
public ProfileBuilder(ContainerContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
public HostingNode create() {
|
||||
|
||||
HostingNode node = new HostingNode();
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
node.newProfile().infrastructure(cfg.infrastructure());
|
||||
|
||||
addSiteTo(node);
|
||||
|
||||
String ip = "not resolved";
|
||||
try {
|
||||
ip = InetAddress.getLocalHost().getHostAddress();
|
||||
} catch (UnknownHostException e) {
|
||||
log.warn("unable to detect the IP address of the host");
|
||||
}
|
||||
|
||||
node.profile().newDescription().activationTime(Calendar.getInstance()).name(cfg.hostname() + ":" + cfg.port());
|
||||
|
||||
node.profile().description().networkAdapters().add().mtu(0).name("local-adapter").ipAddress(ip).inboundIP("")
|
||||
.outboundIP("");
|
||||
|
||||
node.profile().description().newOperatingSystem().name(System.getProperty("os.name"))
|
||||
.version(System.getProperty("os.version")).release("");
|
||||
|
||||
node.profile().description().newArchitecture().platformType(System.getProperty("os.arch")).smpSize(0)
|
||||
.smtSize(0);
|
||||
|
||||
node.profile().newSite().domain("It").country("It").location("Rome").latitude("1").longitude("1");
|
||||
|
||||
ArrayList<HashMap<String, String>> info = cpuInfo();
|
||||
|
||||
Group<Processor> processors = node.profile().description().processors();
|
||||
|
||||
for (HashMap<String, String> map : info)
|
||||
|
||||
processors.add().bogomips(new BigDecimal(map.get("bogomips")))
|
||||
.clockSpeedMhz(new BigDecimal(map.get("cpu_MHz"))).family(map.get("cpu_family"))
|
||||
.modelName(map.get("model_name")).model(map.get("model")).vendor(map.get("vendor_id"))
|
||||
.cacheL1(new Integer(map.get("cache_size"))).cacheL1D(0).cacheL1I(0).cacheL2(0);
|
||||
|
||||
addVariablesTo(node);
|
||||
|
||||
update(node,false);
|
||||
|
||||
node.profile().description().type(GHNType.Static);
|
||||
// String type = (String) context.getProperty(GHNContext.GHN_TYPE, false);
|
||||
// if (type.compareToIgnoreCase(Type.DYNAMIC.toString()) == 0) description.setType(Description.Type.Dynamic);
|
||||
// else if (type.compareToIgnoreCase(Type.STATIC.toString()) == 0) description.setType(Description.Type.Static);
|
||||
// else if (type.compareToIgnoreCase(Type.SELFCLEANING.toString()) == 0)
|
||||
// description.setType(Description.Type.Selfcleaning);
|
||||
//
|
||||
// file system
|
||||
node.profile().description().localFileSystems().add().name("").type("").readOnly(false)
|
||||
.root("/");
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
@SuppressWarnings("all")
|
||||
private ArrayList<HashMap<String, String>> cpuInfo() {
|
||||
|
||||
ArrayList<HashMap<String, String>> map = new ArrayList<HashMap<String, String>>();
|
||||
|
||||
File file = new File("/proc/cpuinfo");
|
||||
|
||||
if (!file.exists()) {
|
||||
log.warn("cannot acquire CPU info (no /proc/cpuinfo)");
|
||||
return map;
|
||||
}
|
||||
|
||||
BufferedReader input = null;
|
||||
|
||||
try {
|
||||
input = new BufferedReader(new FileReader(file));
|
||||
|
||||
String line = null;
|
||||
|
||||
HashMap<String, String> currentProcessor = null;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
|
||||
if ((line.startsWith("processor"))) { // add the current processor to the map
|
||||
|
||||
if (currentProcessor != null)
|
||||
map.add((HashMap) currentProcessor.clone());
|
||||
|
||||
currentProcessor = new HashMap<String, String>();
|
||||
}
|
||||
|
||||
try {
|
||||
if (line.contains("vendor_id"))
|
||||
currentProcessor.put("vendor_id", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cpu family"))
|
||||
currentProcessor.put("cpu_family", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if ((line.contains("model\t")) || (line.contains("model\b")))
|
||||
currentProcessor.put("model", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("model name"))
|
||||
currentProcessor.put("model_name", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cpu MHz"))
|
||||
currentProcessor.put("cpu_MHz", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cache size"))
|
||||
currentProcessor.put("cache_size", line.split(":")[1].trim().split(" ")[0]);
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("bogomips"))
|
||||
currentProcessor.put("bogomips", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
}
|
||||
|
||||
if (currentProcessor != null)
|
||||
map.add(currentProcessor);
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
log.warn("unable to acquire CPU info", e);
|
||||
|
||||
} finally {
|
||||
|
||||
if (input != null)
|
||||
try {
|
||||
input.close();
|
||||
} catch (IOException e) {
|
||||
log.warn("unable to close stream", e);
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private long getFreeSpace() {
|
||||
long free = 0;
|
||||
try {
|
||||
free = context.configuration().persistence().getFreeSpace()/1024;
|
||||
} catch (Exception ioe) {
|
||||
log.warn("unable to detect the free space on the disk", ioe);
|
||||
}
|
||||
return free;
|
||||
}
|
||||
|
||||
public void update(HostingNode node,boolean onLoad) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
if (onLoad) {
|
||||
|
||||
log.info("updating ghn profile");
|
||||
|
||||
node.profile().description().activationTime(Calendar.getInstance()).name(cfg.hostname() + ":" + cfg.port());
|
||||
|
||||
addVariablesTo(node);
|
||||
|
||||
addSiteTo(node);
|
||||
|
||||
}
|
||||
|
||||
node.profile().description().status(context.lifecycle().state().remoteForm());
|
||||
|
||||
Map<String, Long> mem = memoryUsage();
|
||||
|
||||
node.profile().description().newMainMemory().ramAvailable(mem.get("MemoryAvailable"))
|
||||
.ramSize(mem.get("MemoryTotalSize")).virtualAvailable(mem.get("VirtualAvailable"))
|
||||
.virtualSize(mem.get("VirtualSize"));
|
||||
|
||||
node.profile().description().localAvailableSpace(getFreeSpace());
|
||||
|
||||
node.profile().description().uptime(uptime());
|
||||
|
||||
node.profile().description().lastUpdate(Calendar.getInstance());
|
||||
|
||||
Map<String, Double> loads = loadStatistics();
|
||||
|
||||
node.profile().description().newLoad().lastMin(loads.get("1min") == null ? 0 : loads.get("1min"))
|
||||
.last5Mins(loads.get("5mins") == null ? 0 : loads.get("5mins"))
|
||||
.last15Mins(loads.get("15mins") == null ? 0 : loads.get("15mins"));
|
||||
|
||||
}
|
||||
|
||||
private void addSiteTo(HostingNode node) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
node.profile().newSite().country(cfg.site().getCountry()).location(cfg.site().getLocation())
|
||||
.latitude(cfg.site().getLatitude()).longitude(cfg.site().getLongitude()).domain(domainIn(cfg.hostname()));
|
||||
}
|
||||
|
||||
private void addVariablesTo(HostingNode node) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
Group<Variable> variables = node.profile().description().environmentVariables();
|
||||
|
||||
// Cleaning variables to avoid duplicates
|
||||
variables.removeAll(node.profile().description().environmentVariables());
|
||||
|
||||
Map<String, String> map = new HashMap<String, String>();
|
||||
map.putAll(cfg.properties());
|
||||
map.putAll(System.getenv());
|
||||
|
||||
for (Map.Entry<String, String> entry : map.entrySet()) {
|
||||
String varname = entry.getKey();
|
||||
if ((varname.compareToIgnoreCase("CLASSPATH") == 0) || (varname.compareToIgnoreCase("PATH") == 0)
|
||||
|| (varname.contains("SSH")) || (varname.contains("MAIL"))
|
||||
|| (varname.compareToIgnoreCase("LS_COLORS") == 0))
|
||||
continue;
|
||||
variables.add().keyAndValue(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
||||
/* The following code is useless can be removed
|
||||
Map<String, String> envvars = new HashMap<String, String>();
|
||||
for (String varname : envvars.keySet()) {
|
||||
|
||||
// a bit of filtering
|
||||
if ((varname.compareToIgnoreCase("CLASSPATH") == 0) || (varname.compareToIgnoreCase("PATH") == 0)
|
||||
|| (varname.contains("SSH")) || (varname.contains("MAIL"))
|
||||
|| (varname.compareToIgnoreCase("LS_COLORS") == 0))
|
||||
continue;
|
||||
|
||||
variables.add().keyAndValue(varname, envvars.get(varname));
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
String osVersion = System.getProperty("os.name");
|
||||
if(osVersion.compareToIgnoreCase("Linux")==0) {
|
||||
LinuxDistributionInfo linuxDistributionInfo = new LinuxDistributionInfo();
|
||||
Map<String,String> info = linuxDistributionInfo.getInfo();
|
||||
for(String key : info.keySet()) {
|
||||
variables.add().keyAndValue(key, info.get(key));
|
||||
}
|
||||
}
|
||||
|
||||
variables.add().keyAndValue("Java", System.getProperty("java.version"));
|
||||
|
||||
SmartGearsConfiguration config = ProviderFactory.provider().smartgearsConfiguration();
|
||||
variables.add().keyAndValue("SmartGears",config.version());
|
||||
|
||||
variables.add().keyAndValue("ghn-update-interval-in-secs", String.valueOf(cfg.publicationFrequency()));
|
||||
|
||||
}
|
||||
|
||||
public String uptime() {
|
||||
String lines = "", linetemp = null;
|
||||
try {
|
||||
Process p = Runtime.getRuntime().exec("uptime");
|
||||
p.waitFor();
|
||||
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||
while ((linetemp = input.readLine()) != null)
|
||||
lines += linetemp;
|
||||
input.close();
|
||||
p.destroy();
|
||||
lines = lines.split(",")[0].split("up")[1].trim();
|
||||
} catch (Exception e) {
|
||||
log.warn("unable to detect the uptime of this machine", e);
|
||||
lines = "unable to detect";
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
public Map<String, Double> loadStatistics() {
|
||||
|
||||
Map<String, Double> result = new HashMap<String, Double>();
|
||||
try {
|
||||
File loadadv = new File("/proc/loadavg");
|
||||
if (loadadv.exists()) {
|
||||
Reader reader = new FileReader(loadadv);
|
||||
int c;
|
||||
StringBuilder content = new StringBuilder();
|
||||
while ((c = reader.read()) != -1)
|
||||
content.append((char) c);
|
||||
reader.close();
|
||||
Pattern p = Pattern.compile("^(.*?)\\s{1}(.*?)\\s{1}(.*?)\\s{1}(.*)$");
|
||||
Matcher matcher = p.matcher(content.toString());
|
||||
if ((matcher.matches()) && (matcher.groupCount() > 3)) {
|
||||
result.put("1min", new Double(matcher.group(1)));
|
||||
result.put("5mins", new Double(matcher.group(2)));
|
||||
result.put("15mins", new Double(matcher.group(3).split("\\s")[0]));
|
||||
}
|
||||
}
|
||||
} catch (Exception ioe) {
|
||||
log.warn("unable to detect the load values of this machine", ioe);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@SuppressWarnings("all")
|
||||
public Map<String, Long> memoryUsage() {
|
||||
Map<String, Long> map = new HashMap<String, Long>();
|
||||
java.lang.management.OperatingSystemMXBean mxbean = java.lang.management.ManagementFactory
|
||||
.getOperatingSystemMXBean();
|
||||
com.sun.management.OperatingSystemMXBean sunmxbean = (com.sun.management.OperatingSystemMXBean) mxbean;
|
||||
long freeMemory = sunmxbean.getFreePhysicalMemorySize() / 1048576; // in MB
|
||||
long availableMemory = sunmxbean.getTotalPhysicalMemorySize() / 1048576; // in MB
|
||||
map.put("MemoryAvailable", freeMemory);
|
||||
map.put("MemoryTotalSize", availableMemory);
|
||||
long ramVirtualAvailable = Runtime.getRuntime().freeMemory() / 1048576; // in MB
|
||||
long ramVirtualSize = Runtime.getRuntime().totalMemory() / 1048576; // in MB
|
||||
map.put("VirtualAvailable", ramVirtualAvailable);
|
||||
map.put("VirtualSize", ramVirtualSize);
|
||||
return map;
|
||||
}
|
||||
|
||||
private String domainIn(String hostname) {
|
||||
Pattern pattern = Pattern.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})");
|
||||
java.util.regex.Matcher regexMatcher = pattern.matcher(hostname);
|
||||
if (regexMatcher.matches()) //it's an IP address, nothing to trim
|
||||
return hostname;
|
||||
String[] tokens = hostname.split("\\.");
|
||||
if (tokens.length < 2)
|
||||
return hostname;
|
||||
else
|
||||
return tokens[tokens.length-2]+ "." + tokens[tokens.length-1];
|
||||
}
|
||||
}
|
@ -1,291 +0,0 @@
|
||||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.gcube.common.events.Observes.Kind.critical;
|
||||
import static org.gcube.common.events.Observes.Kind.resilient;
|
||||
import static org.gcube.smartgears.Constants.container_profile_property;
|
||||
import static org.gcube.smartgears.Constants.profile_management;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.addToContext;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.changed;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.published;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.removeFromContext;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.failure;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.part_activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.shutdown;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.stop;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerState.active;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.Property;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.OfflineProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.ProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.handlers.container.ContainerLifecycleEvent.Start;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
* Manages the resource profile of the container.
|
||||
* <p>
|
||||
*
|
||||
* The manager:
|
||||
*
|
||||
* <ul>
|
||||
* <li>creates the profile when the container starts for the first time;
|
||||
* <li>loads the profile when the container restarts;
|
||||
* <li>publishes the profile when the container becomes active, and at any lifecycle change thereafter;
|
||||
* <li>stores the profile locally after each publication;
|
||||
* </ul>
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
* @see ProfileBuilder
|
||||
*/
|
||||
public class ProfileContainerManager extends ContainerHandler {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ProfileContainerManager.class);
|
||||
|
||||
private ContainerContext context;
|
||||
|
||||
private ProfileBuilder builder;
|
||||
private ProfilePublisher publisher;
|
||||
|
||||
|
||||
private ScheduledFuture<?> periodicUpdates;
|
||||
|
||||
@Override
|
||||
public void onStart(Start e) {
|
||||
|
||||
context = e.context();
|
||||
builder = new ProfileBuilder(context);
|
||||
|
||||
activated();
|
||||
|
||||
// note we don't fire profile events, but wait for the final startup response which
|
||||
// will result in a state change. only then we publish and store the profile
|
||||
// this avoids the redundancy and performance penalty of storing and publishing multiple
|
||||
// times in rapid succession (which would be correct). Revise if proves problematic in corner
|
||||
// cases.
|
||||
|
||||
}
|
||||
|
||||
private void activated(){
|
||||
HostingNode profile = loadOrCreateProfile();
|
||||
|
||||
share(profile);
|
||||
|
||||
publisher = context.configuration().mode()!=Mode.offline?
|
||||
new ContainerPublisher(context):
|
||||
new OfflineProfilePublisher();
|
||||
|
||||
registerObservers();
|
||||
|
||||
schedulePeriodicUpdates();
|
||||
}
|
||||
|
||||
private void registerObservers() {
|
||||
|
||||
context.events().subscribe(new Object() {
|
||||
|
||||
@Observes({ activation, part_activation, shutdown, stop, failure })
|
||||
void onChanged(ContainerLifecycle lc) {
|
||||
|
||||
HostingNode profile = context.profile();
|
||||
|
||||
profile.profile().description().status(lc.state().remoteForm());
|
||||
|
||||
// since we do not know the observers, they will deal with failures and their consequences
|
||||
// any that comes back will be logged in this event thread
|
||||
context.events().fire(profile, changed);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Observes(value = published)
|
||||
void shareAfterPublish(HostingNode profile) {
|
||||
|
||||
share(profile); // publish may produce a new profile instance
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = changed, kind = critical)
|
||||
void publishAfterChange(HostingNode profile) {
|
||||
log.info("Publish after profile Change event called");
|
||||
publish(profile); // if successful, triggers share and store.
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = addToContext)
|
||||
void addTo(String token) {
|
||||
try {
|
||||
log.trace("publishing container with new token");
|
||||
publisher.addTo(Collections.singleton(token));
|
||||
publisher.update();
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add token {} (see details)",token, e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = removeFromContext)
|
||||
void removeFrom(String token) {
|
||||
try {
|
||||
log.trace("unpublishing container with new token");
|
||||
publisher.removeFrom(Collections.singleton(token));
|
||||
publisher.update();
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot remove token {} (see details)",token, e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private HostingNode loadOrCreateProfile() {
|
||||
|
||||
return createProfile();
|
||||
|
||||
}
|
||||
|
||||
private void share(HostingNode profile) {
|
||||
|
||||
log.trace("sharing container profile");
|
||||
context.properties().add(new Property(container_profile_property, profile));
|
||||
}
|
||||
|
||||
private HostingNode createProfile() {
|
||||
|
||||
log.info("creating container profile");
|
||||
|
||||
try {
|
||||
HostingNode node = builder.create();
|
||||
node.setId(context.id());
|
||||
return node;
|
||||
} catch (Throwable e) {
|
||||
|
||||
// this is a critical startup failure: it will fail the application
|
||||
throw new RuntimeException("cannot create container profile", e);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void publish(HostingNode profile) {
|
||||
|
||||
//ContainerConfiguration configuration = context.configuration();
|
||||
|
||||
// first-publication vs. routine publication: when we delete scopes let's make sure there is
|
||||
// at least one left of it will be re-triggered
|
||||
boolean firstPublication = profile.scopes().isEmpty();
|
||||
|
||||
try {
|
||||
|
||||
if (firstPublication)
|
||||
publisher.addToAll();
|
||||
else
|
||||
publisher.update();
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
log.error("cannot publish container (see details)", e);
|
||||
|
||||
// since we've failed no published event is fired and profile will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the changed profile.
|
||||
//store(profile);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void schedulePeriodicUpdates() {
|
||||
|
||||
// register to cancel updates
|
||||
context.events().subscribe(
|
||||
|
||||
new Object() {
|
||||
|
||||
// we register it in response to lifecycle events so that we can stop and resume along with application
|
||||
@Observes(value = { activation, part_activation }, kind = resilient)
|
||||
synchronized void restartPeriodicUpdates(ContainerLifecycle lc) {
|
||||
|
||||
//already running
|
||||
if (periodicUpdates!=null)
|
||||
return;
|
||||
|
||||
if (lc.state()==active)
|
||||
log.info("scheduling periodic updates of container profile");
|
||||
|
||||
else
|
||||
log.info("resuming periodic updates of container profile");
|
||||
|
||||
final Runnable updateTask = new Runnable() {
|
||||
public void run() {
|
||||
HostingNode profile = context.profile();
|
||||
|
||||
try {
|
||||
builder.update(profile, false);
|
||||
}
|
||||
catch(Exception e) {
|
||||
//we may fail in the update of the profile
|
||||
log.error("cannot complete periodic update of container profile",e);
|
||||
}
|
||||
|
||||
//if handling of event generates failures these will be reported
|
||||
//for resilience we do not fail the application
|
||||
log.trace("firing change event on container profile");
|
||||
context.events().fire(profile,changed);
|
||||
}
|
||||
};
|
||||
|
||||
periodicUpdates = Utils.scheduledServicePool.scheduleAtFixedRate(updateTask, 3, context.configuration()
|
||||
.publicationFrequency(), SECONDS);
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = { stop, failure, shutdown }, kind = resilient)
|
||||
synchronized void cancelPeriodicUpdates(ContainerLifecycle ignore) {
|
||||
|
||||
if (periodicUpdates != null){
|
||||
log.trace("stopping periodic updates of container profile");
|
||||
|
||||
try {
|
||||
periodicUpdates.cancel(true);
|
||||
periodicUpdates=null;
|
||||
}
|
||||
catch(Exception e) {
|
||||
log.warn("could not stop periodic updates of container profile",e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return profile_management;
|
||||
}
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
package org.gcube.smartgears.provider;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
import org.gcube.common.resources.gcore.Resource;
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.informationsystem.publisher.exception.RegistryNotFoundException;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
|
||||
/**
|
||||
* An implementation of {@link ScopedPublisher} that simulates remote publication.
|
||||
* <p>
|
||||
* Used for applications and or containers that operate in {@link Mode#offline}.
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
*
|
||||
*/
|
||||
public class OfflinePublisher implements ScopedPublisher {
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T update(T resource){
|
||||
// do nothing
|
||||
return resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T create(T resource, List<String> scopes)
|
||||
throws RegistryNotFoundException {
|
||||
// fragile! bypass restrictions reflectively and set new scope
|
||||
for (String scope : scopes)
|
||||
try {
|
||||
Method m = resource.getClass().getSuperclass().getDeclaredMethod("addScope", String.class);
|
||||
m.setAccessible(true);
|
||||
m.invoke(resource, scope);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("could not simulate publication in scope " + scope, e);
|
||||
}
|
||||
return resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T remove(T resource, List<String> scopes)
|
||||
throws RegistryNotFoundException {
|
||||
for (String scope : scopes)
|
||||
try {
|
||||
Method m = resource.getClass().getSuperclass().getDeclaredMethod("removeScope", String.class);
|
||||
m.setAccessible(true);
|
||||
m.invoke(resource, scope);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("could not simulate publication remove from scope " + scope, e);
|
||||
}
|
||||
return resource;
|
||||
}
|
||||
|
||||
}
|
@ -1,15 +1,17 @@
|
||||
package org.gcube.smartgears.publishing;
|
||||
|
||||
import org.gcube.smartgears.configuration.application.ApplicationConfiguration;
|
||||
import org.gcube.smartgears.configuration.container.ContainerConfiguration;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.smartgears.context.application.ApplicationContext;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
|
||||
public interface Publisher {
|
||||
|
||||
boolean publishContainer(ContainerConfiguration container, String ... contexts);
|
||||
boolean publishContainer(ContainerContext container, Set<String> contexts);
|
||||
|
||||
boolean publishApplication(ApplicationConfiguration application, String ... contexts);
|
||||
boolean publishApplication(ApplicationContext application, Set<String> contexts);
|
||||
|
||||
boolean unpublishContainer(ContainerConfiguration container, String ... contexts);
|
||||
boolean unpublishContainer(ContainerContext container, Set<String> contexts);
|
||||
|
||||
boolean unpublishApplication(ApplicationConfiguration application, String ... contexts);
|
||||
boolean unpublishApplication(ApplicationContext application, Set<String> contexts);
|
||||
}
|
||||
|
@ -0,0 +1,5 @@
|
||||
package org.gcube.smartgears.publishing;
|
||||
|
||||
public @interface SmartgearsProfilePublisher {
|
||||
|
||||
}
|
Loading…
Reference in New Issue