Publishing externalized to libraries in classpath
This commit is contained in:
parent
961676484f
commit
8030b049d0
6
pom.xml
6
pom.xml
|
@ -42,6 +42,12 @@
|
|||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.reflections</groupId>
|
||||
<artifactId>reflections</artifactId>
|
||||
<version>0.9.10</version>
|
||||
</dependency>
|
||||
|
||||
<!-- gCube Jackson -->
|
||||
<dependency>
|
||||
<groupId>org.gcube.common</groupId>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package org.gcube.smartgears;
|
||||
|
||||
import org.gcube.smartgears.extensions.resource.RemoteResource;
|
||||
import org.gcube.smartgears.handlers.application.lifecycle.ProfileManager;
|
||||
import org.gcube.smartgears.handlers.application.request.RequestAccounting;
|
||||
import org.gcube.smartgears.handlers.application.request.RequestValidator;
|
||||
import org.gcube.smartgears.handlers.container.lifecycle.AccountingManager;
|
||||
|
||||
|
|
|
@ -115,10 +115,7 @@ public interface ApplicationConfiguration {
|
|||
*/
|
||||
ApplicationConfiguration persistence(PersistenceWriter manager);
|
||||
|
||||
void authorizedContexts(Set<String> authorizedContexts);
|
||||
|
||||
Set<String> authorizedContexts();
|
||||
|
||||
/**
|
||||
* Validates this configuration.
|
||||
*
|
||||
|
|
|
@ -17,7 +17,7 @@ import javax.xml.bind.JAXBException;
|
|||
import org.gcube.smartgears.extensions.ApplicationExtension;
|
||||
import org.gcube.smartgears.handlers.application.ApplicationLifecycleHandler;
|
||||
import org.gcube.smartgears.handlers.application.RequestHandler;
|
||||
import org.gcube.smartgears.handlers.application.lifecycle.ProfileManager;
|
||||
import org.gcube.smartgears.handlers.application.lifecycle.ApplicationProfileManager;
|
||||
import org.gcube.smartgears.handlers.application.request.RequestAccounting;
|
||||
import org.gcube.smartgears.handlers.application.request.RequestValidator;
|
||||
|
||||
|
@ -75,7 +75,7 @@ public class ApplicationConfigurationBinder {
|
|||
List<ApplicationLifecycleHandler> lifecycleHandlers = new LinkedList<ApplicationLifecycleHandler>();
|
||||
|
||||
//ADDING BASE Handler (order is important)
|
||||
lifecycleHandlers.add(new ProfileManager());
|
||||
lifecycleHandlers.add(new ApplicationProfileManager());
|
||||
|
||||
|
||||
//TODO scan ApplicationLifecycleHandler form classloader
|
||||
|
|
|
@ -134,16 +134,6 @@ public class BridgedApplicationConfiguration implements ApplicationConfiguration
|
|||
return application.proxyAddress(proxyaddress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authorizedContexts(Set<String> authorizedContexts) {
|
||||
application.authorizedContexts(authorizedContexts);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> authorizedContexts() {
|
||||
return application.authorizedContexts();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -57,8 +57,6 @@ public class DefaultApplicationConfiguration implements ApplicationConfiguration
|
|||
@XmlElementRef
|
||||
Set<Include> includes= new LinkedHashSet<Include>();
|
||||
|
||||
Set<String> authorizedContexts;
|
||||
|
||||
@NotNull @IsValid
|
||||
private PersistenceWriter persistenceManager;
|
||||
|
||||
|
@ -169,17 +167,6 @@ public class DefaultApplicationConfiguration implements ApplicationConfiguration
|
|||
this.proxyAddress = proxyaddress;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authorizedContexts(Set<String> authorizedContexts) {
|
||||
this.authorizedContexts = authorizedContexts;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Set<String> authorizedContexts() {
|
||||
return this.authorizedContexts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validate() {
|
||||
|
|
|
@ -11,7 +11,7 @@ import java.util.stream.Collectors;
|
|||
import org.gcube.smartgears.configuration.ProxyAddress;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.handlers.container.lifecycle.AccountingManager;
|
||||
import org.gcube.smartgears.handlers.container.lifecycle.ProfileContainerManager;
|
||||
import org.gcube.smartgears.handlers.container.lifecycle.ContainerProfileManager;
|
||||
import org.gcube.smartgears.persistence.LocalPersistence;
|
||||
import org.gcube.smartgears.persistence.PersistenceWriter;
|
||||
import org.gcube.smartgears.security.AuthorizationProvider;
|
||||
|
@ -156,9 +156,9 @@ public class ContainerConfigurationBinder {
|
|||
|
||||
LinkedList<ContainerHandler> handlers = new LinkedList<ContainerHandler>();
|
||||
|
||||
//ADDING BASE Handler (order is important)
|
||||
//ADDING BASE Handlers (order is important)
|
||||
handlers.add(new AccountingManager());
|
||||
handlers.add(new ProfileContainerManager());
|
||||
handlers.add(new ContainerProfileManager());
|
||||
|
||||
handlers.addAll(scanForContainerHadlers(classloader));
|
||||
|
||||
|
|
|
@ -22,11 +22,6 @@ public interface ContainerContext {
|
|||
*/
|
||||
ContainerConfiguration configuration();
|
||||
|
||||
/**
|
||||
* Returns the resource profile of a given type of the container.
|
||||
* @return the profile
|
||||
*/
|
||||
HostingNode profile();
|
||||
|
||||
/**
|
||||
* Returns the lifecycle of the container
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
package org.gcube.smartgears.handlers;
|
||||
|
||||
import static org.gcube.smartgears.utils.Utils.notEmpty;
|
||||
import static org.gcube.smartgears.utils.Utils.rethrowUnchecked;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.common.resources.gcore.Resource;
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public abstract class AbstractProfilePublisher<P extends Resource> implements ProfilePublisher {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(AbstractProfilePublisher.class);
|
||||
|
||||
//the underlying IS publisher
|
||||
private final ScopedPublisher publisher;
|
||||
|
||||
//private AuthorizationProvider authProvider ;
|
||||
|
||||
public AbstractProfilePublisher() {
|
||||
this.publisher=ProviderFactory.provider().publisher();
|
||||
//this.authProvider = ProviderFactory.provider().authorizationProvider();
|
||||
}
|
||||
|
||||
protected abstract P getProfile();
|
||||
protected abstract boolean isRoot();
|
||||
protected abstract void sharePublished(P profile);
|
||||
protected abstract Set<String> getAllowedContexts();
|
||||
|
||||
/**
|
||||
* Removes the application from one or more scopes.
|
||||
* @param scopes the scopes
|
||||
*/
|
||||
public void removeFrom(Collection<String> contexts) {
|
||||
P profile = getProfile();
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.remove(profile, new ArrayList<String>(contexts));
|
||||
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
log.debug("after remove application profile contains scopes {}",profile.scopes().asCollection());
|
||||
sharePublished(profile);
|
||||
}
|
||||
|
||||
|
||||
public void addToAll(){
|
||||
this.addTo(getAllowedContexts());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds for the first time the current resource profile of the application in one or more scopes.
|
||||
* @param contexts the contexts
|
||||
*/
|
||||
public void addTo(Collection<String> contexts) {
|
||||
notEmpty("contexts",contexts);
|
||||
|
||||
P profile = getProfile();
|
||||
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
if (isRoot()) Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.create(profile, new ArrayList<String>(contexts));
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot()) Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
|
||||
sharePublished(profile);
|
||||
log.debug("shared profile with scopes {}", profile.scopes().asCollection());
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void update() {
|
||||
P profile = getProfile();
|
||||
ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
|
||||
|
||||
log.debug("using context {}",contextCL.getClass().getSimpleName());
|
||||
|
||||
try{//This classloader set is needed for the jaxb context
|
||||
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(AbstractProfilePublisher.class.getClassLoader());
|
||||
profile = publisher.update(profile);
|
||||
|
||||
} catch (Exception e) {
|
||||
rethrowUnchecked(e);
|
||||
} finally{
|
||||
if (isRoot())
|
||||
Thread.currentThread().setContextClassLoader(contextCL);
|
||||
}
|
||||
|
||||
sharePublished(profile);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -2,36 +2,30 @@ package org.gcube.smartgears.handlers.application.lifecycle;
|
|||
|
||||
import static org.gcube.common.events.Observes.Kind.resilient;
|
||||
import static org.gcube.smartgears.Constants.profile_management;
|
||||
import static org.gcube.smartgears.Constants.profile_property;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.addToContext;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.changed;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.published;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.removeFromContext;
|
||||
import static org.gcube.smartgears.lifecycle.application.ApplicationLifecycle.activation;
|
||||
import static org.gcube.smartgears.lifecycle.application.ApplicationLifecycle.failure;
|
||||
import static org.gcube.smartgears.lifecycle.application.ApplicationLifecycle.stop;
|
||||
import static org.gcube.smartgears.lifecycle.application.ApplicationState.failed;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.common.events.Observes.Kind;
|
||||
import org.gcube.common.resources.gcore.GCoreEndpoint;
|
||||
import org.gcube.smartgears.Constants;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.Property;
|
||||
import org.gcube.smartgears.context.application.ApplicationContext;
|
||||
import org.gcube.smartgears.handlers.OfflineProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.ProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.application.ApplicationLifecycleEvent;
|
||||
import org.gcube.smartgears.handlers.application.ApplicationLifecycleHandler;
|
||||
import org.gcube.smartgears.lifecycle.application.ApplicationLifecycle;
|
||||
import org.gcube.smartgears.lifecycle.application.ApplicationState;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.gcube.smartgears.publishing.Publisher;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -55,25 +49,23 @@ import org.slf4j.LoggerFactory;
|
|||
* @see ProfileBuilder
|
||||
* @see ProfilePublisherImpl
|
||||
*/
|
||||
@XmlRootElement(name = profile_management)
|
||||
public class ProfileManager extends ApplicationLifecycleHandler {
|
||||
public class ApplicationProfileManager extends ApplicationLifecycleHandler {
|
||||
|
||||
Logger log = LoggerFactory.getLogger(ProfileManager.class);
|
||||
Logger log = LoggerFactory.getLogger(ApplicationProfileManager.class);
|
||||
|
||||
private ApplicationContext context;
|
||||
private ProfileBuilder builder;
|
||||
private ProfilePublisher publisher;
|
||||
|
||||
private ScheduledFuture<?> periodicUpdates;
|
||||
|
||||
private List<Publisher> publishers = ProviderFactory.provider().publishers();
|
||||
|
||||
|
||||
@Override
|
||||
public void onStart(ApplicationLifecycleEvent.Start e) {
|
||||
|
||||
context = e.context();
|
||||
builder = new ProfileBuilder(context);
|
||||
|
||||
activated();
|
||||
|
||||
|
||||
schedulePeriodicUpdates();
|
||||
// note we don't fire profile events, but wait for the final startup
|
||||
// outcome which
|
||||
|
@ -89,16 +81,12 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
|
||||
|
||||
private void activated(){
|
||||
GCoreEndpoint profile = loadOrCreateProfile();
|
||||
|
||||
share(profile);
|
||||
|
||||
publisher = context.container().configuration().mode()!=Mode.offline?
|
||||
new ServicePublisher(context):
|
||||
new OfflineProfilePublisher();
|
||||
|
||||
|
||||
|
||||
publishers = context.container().configuration().mode()!=Mode.offline?
|
||||
ProviderFactory.provider().publishers():
|
||||
Collections.emptyList();
|
||||
registerObservers();
|
||||
schedulePeriodicUpdates();
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
@ -109,84 +97,76 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
@Observes({ activation, stop, failure })
|
||||
void onChanged(ApplicationLifecycle lc) {
|
||||
|
||||
profile.profile().deploymentData().status(lc.state().remoteForm());
|
||||
|
||||
log.debug("moving app {} to {}",context.name(), lc.state().remoteForm());
|
||||
|
||||
// since we do not know the observers, they will deal with
|
||||
// failures and their consequences
|
||||
// any that comes back will be logged in this event thread
|
||||
context.events().fire(profile, changed);
|
||||
context.events().fire(context, changed);
|
||||
}
|
||||
|
||||
/*
|
||||
@Observes(value = published)
|
||||
void shareAfterPublish(GCoreEndpoint profile) {
|
||||
|
||||
share(profile); // publish may produce a new profile instance
|
||||
|
||||
}
|
||||
}*/
|
||||
|
||||
@Observes(value = changed, kind = Kind.safe)
|
||||
void publishAfterChange(GCoreEndpoint profile) {
|
||||
void publishAfterChange(ApplicationContext context) {
|
||||
|
||||
boolean firstPublication = profile.scopes().isEmpty();
|
||||
|
||||
//if we've failed before first publication do not try to publish
|
||||
//(we may well have failed there)
|
||||
try {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
publisher.publishApplication(context,
|
||||
context.container().configuration().authorizationProvider().getContexts());
|
||||
}catch (Exception e) {
|
||||
|
||||
if (firstPublication) {
|
||||
if (context.lifecycle().state()!= failed)
|
||||
publishFirstTime(profile);
|
||||
log.error("cannot publish {} with publisher type {} (see details)",context.name(), publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
else{
|
||||
log.debug("update app {} profile",context.name());
|
||||
publisher.update(); // if successful, triggers share.
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
||||
log.error("cannot publish "+context.name()+" (see details)", e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = addToContext)
|
||||
void addTo(String token) {
|
||||
try {
|
||||
log.trace("publishing application in new context");
|
||||
publisher.addTo(Collections.singleton(token));
|
||||
publisher.update();
|
||||
}catch (Exception e) {
|
||||
void addTo(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.debug("publishing application in context {}", scope);
|
||||
publisher.publishApplication(context,
|
||||
Collections.singleton(scope));
|
||||
|
||||
log.error("cannot add token {} (see details)",token, e);
|
||||
}catch (Exception e) {
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
log.error("cannot add context {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Observes(value = removeFromContext)
|
||||
void removeFrom(String token) {
|
||||
void removeFrom(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.trace("unpublishing application from context");
|
||||
publisher.removeFrom(Collections.singleton(token));
|
||||
publisher.update();
|
||||
log.debug("unpublishing application from scope {}", scope);
|
||||
publisher.unpublishApplication(context,
|
||||
Collections.singleton(scope));
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot remove token {} (see details)",token, e);
|
||||
log.error("cannot remove scope {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
|
@ -199,52 +179,6 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
});
|
||||
}
|
||||
|
||||
|
||||
private void share(GCoreEndpoint profile) {
|
||||
|
||||
log.trace("sharing profile for {}", context.name());
|
||||
|
||||
context.properties().add(new Property(profile_property, profile));
|
||||
}
|
||||
|
||||
private void publishFirstTime(GCoreEndpoint profile) {
|
||||
|
||||
try {
|
||||
|
||||
publisher.addToAll();
|
||||
|
||||
} catch (Exception e) {
|
||||
log.warn("publishing failed",e);
|
||||
}
|
||||
}
|
||||
|
||||
private GCoreEndpoint loadOrCreateProfile() {
|
||||
|
||||
return create();
|
||||
}
|
||||
|
||||
private GCoreEndpoint create() {
|
||||
|
||||
log.info("creating profile for {}", context.name());
|
||||
|
||||
try {
|
||||
|
||||
GCoreEndpoint profile = new GCoreEndpoint();
|
||||
profile.setId(context.id());
|
||||
|
||||
builder.fill(profile);
|
||||
|
||||
return profile;
|
||||
|
||||
} catch (RuntimeException e) {
|
||||
|
||||
// this is a critical startup failure: it will fail the application
|
||||
throw new RuntimeException("cannot create profile for " + context.name(), e);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void schedulePeriodicUpdates() {
|
||||
|
||||
// register to cancel updates
|
||||
|
@ -268,12 +202,10 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
|
||||
final Runnable updateTask = new Runnable() {
|
||||
public void run() {
|
||||
GCoreEndpoint profile = context.profile();
|
||||
|
||||
//if handling of event generates failures these will be reported
|
||||
//for resilience we do not fail the application
|
||||
log.trace("firing change event on application {} profile", context.name());
|
||||
context.events().fire(profile,changed);
|
||||
log.trace("firing change event on application {} ", context.name());
|
||||
context.events().fire(context,changed);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -292,7 +224,7 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
periodicUpdates=null;
|
||||
}
|
||||
catch(Exception e) {
|
||||
log.warn("could not stop periodic updates of application {} profile", context.name(),e);
|
||||
log.warn("could not stop periodic updates of application {}", context.name(),e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -300,8 +232,8 @@ public class ProfileManager extends ApplicationLifecycleHandler {
|
|||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return profile_management;
|
|
@ -0,0 +1,208 @@
|
|||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.gcube.common.events.Observes.Kind.critical;
|
||||
import static org.gcube.common.events.Observes.Kind.resilient;
|
||||
import static org.gcube.smartgears.Constants.profile_management;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.addToContext;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.changed;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.removeFromContext;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.failure;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.part_activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.shutdown;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.stop;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerState.active;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.handlers.container.ContainerLifecycleEvent;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.gcube.smartgears.publishing.Publisher;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
* Manages the resource profile of the application.
|
||||
* <p>
|
||||
*
|
||||
* The manager:
|
||||
*
|
||||
* <ul>
|
||||
* <li>creates the profile when the application starts for the first time;
|
||||
* <li>loads the profile when the application restarts;
|
||||
* <li>publishes the profile when the application becomes active, and at any
|
||||
* lifecycle change thereafter;
|
||||
* <li>stores the profile locally after each publication;
|
||||
* </ul>
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
* @see ProfileBuilder
|
||||
* @see ProfilePublisherImpl
|
||||
*/
|
||||
public class ContainerProfileManager extends ContainerHandler {
|
||||
|
||||
Logger log = LoggerFactory.getLogger(ContainerProfileManager.class);
|
||||
|
||||
private ContainerContext context;
|
||||
private ScheduledFuture<?> periodicUpdates;
|
||||
|
||||
private List<Publisher> publishers;
|
||||
|
||||
|
||||
@Override
|
||||
public void onStart(ContainerLifecycleEvent.Start e) {
|
||||
|
||||
context = e.context();
|
||||
|
||||
activated();
|
||||
|
||||
schedulePeriodicUpdates();
|
||||
// note we don't fire profile events, but wait for the final startup
|
||||
// outcome which
|
||||
// will result in a state change. only then we publish and store the
|
||||
// profile
|
||||
// this avoids the redundancy and performance penalty of storing and
|
||||
// publishing multiple
|
||||
// times in rapid succession (which would be correct). Revise if proves
|
||||
// problematic in corner
|
||||
// cases.
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void activated(){
|
||||
publishers = context.configuration().mode()!=Mode.offline?
|
||||
ProviderFactory.provider().publishers():
|
||||
Collections.emptyList();
|
||||
registerObservers();
|
||||
schedulePeriodicUpdates();
|
||||
}
|
||||
|
||||
private void registerObservers() {
|
||||
context.events().subscribe(new Object() {
|
||||
@Observes({ activation, part_activation, shutdown, stop, failure })
|
||||
void onChanged(ContainerLifecycle lc) {
|
||||
|
||||
// since we do not know the observers, they will deal with failures and their consequences
|
||||
// any that comes back will be logged in this event thread
|
||||
context.events().fire(context, changed);
|
||||
}
|
||||
|
||||
@Observes(value = changed, kind = critical)
|
||||
void publishAfterChange(ContainerContext context) {
|
||||
log.info("Publish after profile Change event called");
|
||||
//if we've failed before first publication do not try to publish
|
||||
//(we may well have failed there)
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
publisher.publishContainer(context,
|
||||
context.configuration().authorizationProvider().getContexts());
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot publish containar with publisher type {} (see details)", publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
}
|
||||
|
||||
@Observes(value = addToContext)
|
||||
void addTo(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.trace("publishing container within new scope");
|
||||
publisher.publishContainer(context,
|
||||
Collections.singleton(scope));
|
||||
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add container to {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = removeFromContext)
|
||||
void removeFrom(String scope) {
|
||||
for (Publisher publisher: publishers)
|
||||
try {
|
||||
log.trace("unpublishing container from context {}", scope);
|
||||
publisher.unpublishContainer(context,
|
||||
Collections.singleton(scope));
|
||||
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add container to {} with publisher type {} (see details)",scope, publisher.getClass().getCanonicalName(), e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private void schedulePeriodicUpdates() {
|
||||
// register to cancel updates
|
||||
context.events().subscribe(
|
||||
new Object() {
|
||||
// we register it in response to lifecycle events so that we can stop and resume along with application
|
||||
@Observes(value = { activation, part_activation }, kind = resilient)
|
||||
synchronized void restartPeriodicUpdates(ContainerLifecycle lc) {
|
||||
//already running
|
||||
if (periodicUpdates!=null)
|
||||
return;
|
||||
if (lc.state()==active)
|
||||
log.info("scheduling periodic updates of container profile");
|
||||
else
|
||||
log.info("resuming periodic updates of container profile");
|
||||
final Runnable updateTask = new Runnable() {
|
||||
public void run() {
|
||||
context.events().fire(context,changed);
|
||||
}
|
||||
};
|
||||
periodicUpdates = Utils.scheduledServicePool.scheduleAtFixedRate(updateTask, 3, context.configuration()
|
||||
.publicationFrequency(), SECONDS);
|
||||
}
|
||||
@Observes(value = { stop, failure, shutdown }, kind = resilient)
|
||||
synchronized void cancelPeriodicUpdates(ContainerLifecycle ignore) {
|
||||
if (periodicUpdates != null){
|
||||
log.trace("stopping periodic updates of container profile");
|
||||
try {
|
||||
periodicUpdates.cancel(true);
|
||||
periodicUpdates=null;
|
||||
}
|
||||
catch(Exception e) {
|
||||
log.warn("could not stop periodic updates of container profile",e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return profile_management;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.AbstractProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.ProfileEvents;
|
||||
|
||||
/**
|
||||
* Publishes the resource profile of the container.
|
||||
* <p>
|
||||
* Distinguishes publication in new scopes ({@link #addTo(List)} from publication updates in existing scopes ({@link #update(List)}.
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
*
|
||||
*/
|
||||
public class ContainerPublisher extends AbstractProfilePublisher<HostingNode> {
|
||||
|
||||
|
||||
private final ContainerContext context;
|
||||
|
||||
public ContainerPublisher(ContainerContext context) {
|
||||
super();
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
|
||||
protected void sharePublished(HostingNode profile) {
|
||||
context.events().fire(profile,ProfileEvents.published);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected HostingNode getProfile() {
|
||||
return context.profile();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean isRoot() {
|
||||
return context.configuration().mode()!=Mode.root;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Set<String> getAllowedContexts() {
|
||||
return context.configuration().allowedContexts();
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* @author Luca Frosini (ISTI-CNR)
|
||||
*/
|
||||
public class LinuxDistributionInfo {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(LinuxDistributionInfo.class);
|
||||
|
||||
public static final String LSB_RELEASE_COMMAND = "lsb_release -a";
|
||||
public static final String OS_RELEASE_FILE_PATH = "/etc/os-release";
|
||||
|
||||
protected Map<String, String> info;
|
||||
|
||||
protected Map<String, String> getInfoViaLsbReleaseCommand() throws IOException {
|
||||
logger.trace("Going to exec {}", LSB_RELEASE_COMMAND);
|
||||
Process process = Runtime.getRuntime().exec(LSB_RELEASE_COMMAND);
|
||||
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
|
||||
Map<String, String> map = parseBufferedReader(bufferedReader);
|
||||
bufferedReader.close();
|
||||
return map;
|
||||
}
|
||||
|
||||
private Map<String, String> parseBufferedReader(BufferedReader bufferedReader) throws IOException {
|
||||
Map<String, String> map = new HashMap<>();
|
||||
String line = "";
|
||||
while ((line = bufferedReader.readLine()) != null) {
|
||||
String[] nameValue = parseLine(line);
|
||||
map.put(nameValue[0], nameValue[1]);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private String[] parseLine(String line) {
|
||||
String[] splitted = line.split("=");
|
||||
if (splitted.length < 2) {
|
||||
splitted = line.split(":");
|
||||
}
|
||||
String[] ret = new String[2];
|
||||
ret[0] = splitted[0].trim();
|
||||
ret[1] = splitted[1].trim().replace("\"", "");
|
||||
return ret;
|
||||
}
|
||||
|
||||
private Map<String, String> getInfoViaFile(File file) throws IOException {
|
||||
logger.trace("Going to read file {}", file.getAbsolutePath());
|
||||
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
|
||||
Map<String, String> map = parseBufferedReader(bufferedReader);
|
||||
bufferedReader.close();
|
||||
return map;
|
||||
|
||||
}
|
||||
|
||||
protected Map<String, String> getInfoViaOsReleaseFile() throws IOException {
|
||||
File osReleaseFile = new File(OS_RELEASE_FILE_PATH);
|
||||
return getInfoViaFile(osReleaseFile);
|
||||
}
|
||||
|
||||
private Map<String, String> retriveInfo() {
|
||||
try {
|
||||
return getInfoViaLsbReleaseCommand();
|
||||
} catch (IOException e) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
return getInfoViaOsReleaseFile();
|
||||
}catch (IOException e) {
|
||||
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public Map<String, String> getInfo() {
|
||||
if (info == null) {
|
||||
info = retriveInfo();
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,375 +0,0 @@
|
|||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
import java.math.BigDecimal;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.GHNType;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.Processor;
|
||||
import org.gcube.common.resources.gcore.HostingNode.Profile.NodeDescription.Variable;
|
||||
import org.gcube.common.resources.gcore.utils.Group;
|
||||
import org.gcube.smartgears.configuration.container.ContainerConfiguration;
|
||||
import org.gcube.smartgears.configuration.library.SmartGearsConfiguration;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.provider.ProviderFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* @author Fabio Simeoni
|
||||
* @author Luca Frosini (ISTI - CNR)
|
||||
*
|
||||
*/
|
||||
public class ProfileBuilder {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ProfileBuilder.class);
|
||||
|
||||
private ContainerContext context;
|
||||
|
||||
public ProfileBuilder(ContainerContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
public HostingNode create() {
|
||||
|
||||
HostingNode node = new HostingNode();
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
node.newProfile().infrastructure(cfg.infrastructure());
|
||||
|
||||
addSiteTo(node);
|
||||
|
||||
String ip = "not resolved";
|
||||
try {
|
||||
ip = InetAddress.getLocalHost().getHostAddress();
|
||||
} catch (UnknownHostException e) {
|
||||
log.warn("unable to detect the IP address of the host");
|
||||
}
|
||||
|
||||
node.profile().newDescription().activationTime(Calendar.getInstance()).name(cfg.hostname() + ":" + cfg.port());
|
||||
|
||||
node.profile().description().networkAdapters().add().mtu(0).name("local-adapter").ipAddress(ip).inboundIP("")
|
||||
.outboundIP("");
|
||||
|
||||
node.profile().description().newOperatingSystem().name(System.getProperty("os.name"))
|
||||
.version(System.getProperty("os.version")).release("");
|
||||
|
||||
node.profile().description().newArchitecture().platformType(System.getProperty("os.arch")).smpSize(0)
|
||||
.smtSize(0);
|
||||
|
||||
node.profile().newSite().domain("It").country("It").location("Rome").latitude("1").longitude("1");
|
||||
|
||||
ArrayList<HashMap<String, String>> info = cpuInfo();
|
||||
|
||||
Group<Processor> processors = node.profile().description().processors();
|
||||
|
||||
for (HashMap<String, String> map : info)
|
||||
|
||||
processors.add().bogomips(new BigDecimal(map.get("bogomips")))
|
||||
.clockSpeedMhz(new BigDecimal(map.get("cpu_MHz"))).family(map.get("cpu_family"))
|
||||
.modelName(map.get("model_name")).model(map.get("model")).vendor(map.get("vendor_id"))
|
||||
.cacheL1(new Integer(map.get("cache_size"))).cacheL1D(0).cacheL1I(0).cacheL2(0);
|
||||
|
||||
addVariablesTo(node);
|
||||
|
||||
update(node,false);
|
||||
|
||||
node.profile().description().type(GHNType.Static);
|
||||
// String type = (String) context.getProperty(GHNContext.GHN_TYPE, false);
|
||||
// if (type.compareToIgnoreCase(Type.DYNAMIC.toString()) == 0) description.setType(Description.Type.Dynamic);
|
||||
// else if (type.compareToIgnoreCase(Type.STATIC.toString()) == 0) description.setType(Description.Type.Static);
|
||||
// else if (type.compareToIgnoreCase(Type.SELFCLEANING.toString()) == 0)
|
||||
// description.setType(Description.Type.Selfcleaning);
|
||||
//
|
||||
// file system
|
||||
node.profile().description().localFileSystems().add().name("").type("").readOnly(false)
|
||||
.root("/");
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
@SuppressWarnings("all")
|
||||
private ArrayList<HashMap<String, String>> cpuInfo() {
|
||||
|
||||
ArrayList<HashMap<String, String>> map = new ArrayList<HashMap<String, String>>();
|
||||
|
||||
File file = new File("/proc/cpuinfo");
|
||||
|
||||
if (!file.exists()) {
|
||||
log.warn("cannot acquire CPU info (no /proc/cpuinfo)");
|
||||
return map;
|
||||
}
|
||||
|
||||
BufferedReader input = null;
|
||||
|
||||
try {
|
||||
input = new BufferedReader(new FileReader(file));
|
||||
|
||||
String line = null;
|
||||
|
||||
HashMap<String, String> currentProcessor = null;
|
||||
|
||||
while ((line = input.readLine()) != null) {
|
||||
|
||||
if ((line.startsWith("processor"))) { // add the current processor to the map
|
||||
|
||||
if (currentProcessor != null)
|
||||
map.add((HashMap) currentProcessor.clone());
|
||||
|
||||
currentProcessor = new HashMap<String, String>();
|
||||
}
|
||||
|
||||
try {
|
||||
if (line.contains("vendor_id"))
|
||||
currentProcessor.put("vendor_id", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cpu family"))
|
||||
currentProcessor.put("cpu_family", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if ((line.contains("model\t")) || (line.contains("model\b")))
|
||||
currentProcessor.put("model", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("model name"))
|
||||
currentProcessor.put("model_name", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cpu MHz"))
|
||||
currentProcessor.put("cpu_MHz", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("cache size"))
|
||||
currentProcessor.put("cache_size", line.split(":")[1].trim().split(" ")[0]);
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
try {
|
||||
if (line.contains("bogomips"))
|
||||
currentProcessor.put("bogomips", line.split(":")[1].trim());
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
}
|
||||
|
||||
if (currentProcessor != null)
|
||||
map.add(currentProcessor);
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
log.warn("unable to acquire CPU info", e);
|
||||
|
||||
} finally {
|
||||
|
||||
if (input != null)
|
||||
try {
|
||||
input.close();
|
||||
} catch (IOException e) {
|
||||
log.warn("unable to close stream", e);
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private long getFreeSpace() {
|
||||
long free = 0;
|
||||
try {
|
||||
free = context.configuration().persistence().getFreeSpace()/1024;
|
||||
} catch (Exception ioe) {
|
||||
log.warn("unable to detect the free space on the disk", ioe);
|
||||
}
|
||||
return free;
|
||||
}
|
||||
|
||||
public void update(HostingNode node,boolean onLoad) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
if (onLoad) {
|
||||
|
||||
log.info("updating ghn profile");
|
||||
|
||||
node.profile().description().activationTime(Calendar.getInstance()).name(cfg.hostname() + ":" + cfg.port());
|
||||
|
||||
addVariablesTo(node);
|
||||
|
||||
addSiteTo(node);
|
||||
|
||||
}
|
||||
|
||||
node.profile().description().status(context.lifecycle().state().remoteForm());
|
||||
|
||||
Map<String, Long> mem = memoryUsage();
|
||||
|
||||
node.profile().description().newMainMemory().ramAvailable(mem.get("MemoryAvailable"))
|
||||
.ramSize(mem.get("MemoryTotalSize")).virtualAvailable(mem.get("VirtualAvailable"))
|
||||
.virtualSize(mem.get("VirtualSize"));
|
||||
|
||||
node.profile().description().localAvailableSpace(getFreeSpace());
|
||||
|
||||
node.profile().description().uptime(uptime());
|
||||
|
||||
node.profile().description().lastUpdate(Calendar.getInstance());
|
||||
|
||||
Map<String, Double> loads = loadStatistics();
|
||||
|
||||
node.profile().description().newLoad().lastMin(loads.get("1min") == null ? 0 : loads.get("1min"))
|
||||
.last5Mins(loads.get("5mins") == null ? 0 : loads.get("5mins"))
|
||||
.last15Mins(loads.get("15mins") == null ? 0 : loads.get("15mins"));
|
||||
|
||||
}
|
||||
|
||||
private void addSiteTo(HostingNode node) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
node.profile().newSite().country(cfg.site().getCountry()).location(cfg.site().getLocation())
|
||||
.latitude(cfg.site().getLatitude()).longitude(cfg.site().getLongitude()).domain(domainIn(cfg.hostname()));
|
||||
}
|
||||
|
||||
private void addVariablesTo(HostingNode node) {
|
||||
|
||||
ContainerConfiguration cfg = context.configuration();
|
||||
|
||||
Group<Variable> variables = node.profile().description().environmentVariables();
|
||||
|
||||
// Cleaning variables to avoid duplicates
|
||||
variables.removeAll(node.profile().description().environmentVariables());
|
||||
|
||||
Map<String, String> map = new HashMap<String, String>();
|
||||
map.putAll(cfg.properties());
|
||||
map.putAll(System.getenv());
|
||||
|
||||
for (Map.Entry<String, String> entry : map.entrySet()) {
|
||||
String varname = entry.getKey();
|
||||
if ((varname.compareToIgnoreCase("CLASSPATH") == 0) || (varname.compareToIgnoreCase("PATH") == 0)
|
||||
|| (varname.contains("SSH")) || (varname.contains("MAIL"))
|
||||
|| (varname.compareToIgnoreCase("LS_COLORS") == 0))
|
||||
continue;
|
||||
variables.add().keyAndValue(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
||||
/* The following code is useless can be removed
|
||||
Map<String, String> envvars = new HashMap<String, String>();
|
||||
for (String varname : envvars.keySet()) {
|
||||
|
||||
// a bit of filtering
|
||||
if ((varname.compareToIgnoreCase("CLASSPATH") == 0) || (varname.compareToIgnoreCase("PATH") == 0)
|
||||
|| (varname.contains("SSH")) || (varname.contains("MAIL"))
|
||||
|| (varname.compareToIgnoreCase("LS_COLORS") == 0))
|
||||
continue;
|
||||
|
||||
variables.add().keyAndValue(varname, envvars.get(varname));
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
String osVersion = System.getProperty("os.name");
|
||||
if(osVersion.compareToIgnoreCase("Linux")==0) {
|
||||
LinuxDistributionInfo linuxDistributionInfo = new LinuxDistributionInfo();
|
||||
Map<String,String> info = linuxDistributionInfo.getInfo();
|
||||
for(String key : info.keySet()) {
|
||||
variables.add().keyAndValue(key, info.get(key));
|
||||
}
|
||||
}
|
||||
|
||||
variables.add().keyAndValue("Java", System.getProperty("java.version"));
|
||||
|
||||
SmartGearsConfiguration config = ProviderFactory.provider().smartgearsConfiguration();
|
||||
variables.add().keyAndValue("SmartGears",config.version());
|
||||
|
||||
variables.add().keyAndValue("ghn-update-interval-in-secs", String.valueOf(cfg.publicationFrequency()));
|
||||
|
||||
}
|
||||
|
||||
public String uptime() {
|
||||
String lines = "", linetemp = null;
|
||||
try {
|
||||
Process p = Runtime.getRuntime().exec("uptime");
|
||||
p.waitFor();
|
||||
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||
while ((linetemp = input.readLine()) != null)
|
||||
lines += linetemp;
|
||||
input.close();
|
||||
p.destroy();
|
||||
lines = lines.split(",")[0].split("up")[1].trim();
|
||||
} catch (Exception e) {
|
||||
log.warn("unable to detect the uptime of this machine", e);
|
||||
lines = "unable to detect";
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
public Map<String, Double> loadStatistics() {
|
||||
|
||||
Map<String, Double> result = new HashMap<String, Double>();
|
||||
try {
|
||||
File loadadv = new File("/proc/loadavg");
|
||||
if (loadadv.exists()) {
|
||||
Reader reader = new FileReader(loadadv);
|
||||
int c;
|
||||
StringBuilder content = new StringBuilder();
|
||||
while ((c = reader.read()) != -1)
|
||||
content.append((char) c);
|
||||
reader.close();
|
||||
Pattern p = Pattern.compile("^(.*?)\\s{1}(.*?)\\s{1}(.*?)\\s{1}(.*)$");
|
||||
Matcher matcher = p.matcher(content.toString());
|
||||
if ((matcher.matches()) && (matcher.groupCount() > 3)) {
|
||||
result.put("1min", new Double(matcher.group(1)));
|
||||
result.put("5mins", new Double(matcher.group(2)));
|
||||
result.put("15mins", new Double(matcher.group(3).split("\\s")[0]));
|
||||
}
|
||||
}
|
||||
} catch (Exception ioe) {
|
||||
log.warn("unable to detect the load values of this machine", ioe);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@SuppressWarnings("all")
|
||||
public Map<String, Long> memoryUsage() {
|
||||
Map<String, Long> map = new HashMap<String, Long>();
|
||||
java.lang.management.OperatingSystemMXBean mxbean = java.lang.management.ManagementFactory
|
||||
.getOperatingSystemMXBean();
|
||||
com.sun.management.OperatingSystemMXBean sunmxbean = (com.sun.management.OperatingSystemMXBean) mxbean;
|
||||
long freeMemory = sunmxbean.getFreePhysicalMemorySize() / 1048576; // in MB
|
||||
long availableMemory = sunmxbean.getTotalPhysicalMemorySize() / 1048576; // in MB
|
||||
map.put("MemoryAvailable", freeMemory);
|
||||
map.put("MemoryTotalSize", availableMemory);
|
||||
long ramVirtualAvailable = Runtime.getRuntime().freeMemory() / 1048576; // in MB
|
||||
long ramVirtualSize = Runtime.getRuntime().totalMemory() / 1048576; // in MB
|
||||
map.put("VirtualAvailable", ramVirtualAvailable);
|
||||
map.put("VirtualSize", ramVirtualSize);
|
||||
return map;
|
||||
}
|
||||
|
||||
private String domainIn(String hostname) {
|
||||
Pattern pattern = Pattern.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})");
|
||||
java.util.regex.Matcher regexMatcher = pattern.matcher(hostname);
|
||||
if (regexMatcher.matches()) //it's an IP address, nothing to trim
|
||||
return hostname;
|
||||
String[] tokens = hostname.split("\\.");
|
||||
if (tokens.length < 2)
|
||||
return hostname;
|
||||
else
|
||||
return tokens[tokens.length-2]+ "." + tokens[tokens.length-1];
|
||||
}
|
||||
}
|
|
@ -1,291 +0,0 @@
|
|||
package org.gcube.smartgears.handlers.container.lifecycle;
|
||||
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.gcube.common.events.Observes.Kind.critical;
|
||||
import static org.gcube.common.events.Observes.Kind.resilient;
|
||||
import static org.gcube.smartgears.Constants.container_profile_property;
|
||||
import static org.gcube.smartgears.Constants.profile_management;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.addToContext;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.changed;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.published;
|
||||
import static org.gcube.smartgears.handlers.ProfileEvents.removeFromContext;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.failure;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.part_activation;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.shutdown;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerLifecycle.stop;
|
||||
import static org.gcube.smartgears.lifecycle.container.ContainerState.active;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.common.resources.gcore.HostingNode;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.context.Property;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.OfflineProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.ProfilePublisher;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.handlers.container.ContainerLifecycleEvent.Start;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
* Manages the resource profile of the container.
|
||||
* <p>
|
||||
*
|
||||
* The manager:
|
||||
*
|
||||
* <ul>
|
||||
* <li>creates the profile when the container starts for the first time;
|
||||
* <li>loads the profile when the container restarts;
|
||||
* <li>publishes the profile when the container becomes active, and at any lifecycle change thereafter;
|
||||
* <li>stores the profile locally after each publication;
|
||||
* </ul>
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
* @see ProfileBuilder
|
||||
*/
|
||||
public class ProfileContainerManager extends ContainerHandler {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ProfileContainerManager.class);
|
||||
|
||||
private ContainerContext context;
|
||||
|
||||
private ProfileBuilder builder;
|
||||
private ProfilePublisher publisher;
|
||||
|
||||
|
||||
private ScheduledFuture<?> periodicUpdates;
|
||||
|
||||
@Override
|
||||
public void onStart(Start e) {
|
||||
|
||||
context = e.context();
|
||||
builder = new ProfileBuilder(context);
|
||||
|
||||
activated();
|
||||
|
||||
// note we don't fire profile events, but wait for the final startup response which
|
||||
// will result in a state change. only then we publish and store the profile
|
||||
// this avoids the redundancy and performance penalty of storing and publishing multiple
|
||||
// times in rapid succession (which would be correct). Revise if proves problematic in corner
|
||||
// cases.
|
||||
|
||||
}
|
||||
|
||||
private void activated(){
|
||||
HostingNode profile = loadOrCreateProfile();
|
||||
|
||||
share(profile);
|
||||
|
||||
publisher = context.configuration().mode()!=Mode.offline?
|
||||
new ContainerPublisher(context):
|
||||
new OfflineProfilePublisher();
|
||||
|
||||
registerObservers();
|
||||
|
||||
schedulePeriodicUpdates();
|
||||
}
|
||||
|
||||
private void registerObservers() {
|
||||
|
||||
context.events().subscribe(new Object() {
|
||||
|
||||
@Observes({ activation, part_activation, shutdown, stop, failure })
|
||||
void onChanged(ContainerLifecycle lc) {
|
||||
|
||||
HostingNode profile = context.profile();
|
||||
|
||||
profile.profile().description().status(lc.state().remoteForm());
|
||||
|
||||
// since we do not know the observers, they will deal with failures and their consequences
|
||||
// any that comes back will be logged in this event thread
|
||||
context.events().fire(profile, changed);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Observes(value = published)
|
||||
void shareAfterPublish(HostingNode profile) {
|
||||
|
||||
share(profile); // publish may produce a new profile instance
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = changed, kind = critical)
|
||||
void publishAfterChange(HostingNode profile) {
|
||||
log.info("Publish after profile Change event called");
|
||||
publish(profile); // if successful, triggers share and store.
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = addToContext)
|
||||
void addTo(String token) {
|
||||
try {
|
||||
log.trace("publishing container with new token");
|
||||
publisher.addTo(Collections.singleton(token));
|
||||
publisher.update();
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot add token {} (see details)",token, e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = removeFromContext)
|
||||
void removeFrom(String token) {
|
||||
try {
|
||||
log.trace("unpublishing container with new token");
|
||||
publisher.removeFrom(Collections.singleton(token));
|
||||
publisher.update();
|
||||
}catch (Exception e) {
|
||||
|
||||
log.error("cannot remove token {} (see details)",token, e);
|
||||
|
||||
// since we've failed no published event is fired and profile
|
||||
// will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the
|
||||
// changed profile.
|
||||
//TODO: CHECK --- store(profile);
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private HostingNode loadOrCreateProfile() {
|
||||
|
||||
return createProfile();
|
||||
|
||||
}
|
||||
|
||||
private void share(HostingNode profile) {
|
||||
|
||||
log.trace("sharing container profile");
|
||||
context.properties().add(new Property(container_profile_property, profile));
|
||||
}
|
||||
|
||||
private HostingNode createProfile() {
|
||||
|
||||
log.info("creating container profile");
|
||||
|
||||
try {
|
||||
HostingNode node = builder.create();
|
||||
node.setId(context.id());
|
||||
return node;
|
||||
} catch (Throwable e) {
|
||||
|
||||
// this is a critical startup failure: it will fail the application
|
||||
throw new RuntimeException("cannot create container profile", e);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void publish(HostingNode profile) {
|
||||
|
||||
//ContainerConfiguration configuration = context.configuration();
|
||||
|
||||
// first-publication vs. routine publication: when we delete scopes let's make sure there is
|
||||
// at least one left of it will be re-triggered
|
||||
boolean firstPublication = profile.scopes().isEmpty();
|
||||
|
||||
try {
|
||||
|
||||
if (firstPublication)
|
||||
publisher.addToAll();
|
||||
else
|
||||
publisher.update();
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
log.error("cannot publish container (see details)", e);
|
||||
|
||||
// since we've failed no published event is fired and profile will not be stored.
|
||||
// we do it manually to ensure we leave some local trace of the changed profile.
|
||||
//store(profile);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void schedulePeriodicUpdates() {
|
||||
|
||||
// register to cancel updates
|
||||
context.events().subscribe(
|
||||
|
||||
new Object() {
|
||||
|
||||
// we register it in response to lifecycle events so that we can stop and resume along with application
|
||||
@Observes(value = { activation, part_activation }, kind = resilient)
|
||||
synchronized void restartPeriodicUpdates(ContainerLifecycle lc) {
|
||||
|
||||
//already running
|
||||
if (periodicUpdates!=null)
|
||||
return;
|
||||
|
||||
if (lc.state()==active)
|
||||
log.info("scheduling periodic updates of container profile");
|
||||
|
||||
else
|
||||
log.info("resuming periodic updates of container profile");
|
||||
|
||||
final Runnable updateTask = new Runnable() {
|
||||
public void run() {
|
||||
HostingNode profile = context.profile();
|
||||
|
||||
try {
|
||||
builder.update(profile, false);
|
||||
}
|
||||
catch(Exception e) {
|
||||
//we may fail in the update of the profile
|
||||
log.error("cannot complete periodic update of container profile",e);
|
||||
}
|
||||
|
||||
//if handling of event generates failures these will be reported
|
||||
//for resilience we do not fail the application
|
||||
log.trace("firing change event on container profile");
|
||||
context.events().fire(profile,changed);
|
||||
}
|
||||
};
|
||||
|
||||
periodicUpdates = Utils.scheduledServicePool.scheduleAtFixedRate(updateTask, 3, context.configuration()
|
||||
.publicationFrequency(), SECONDS);
|
||||
|
||||
}
|
||||
|
||||
@Observes(value = { stop, failure, shutdown }, kind = resilient)
|
||||
synchronized void cancelPeriodicUpdates(ContainerLifecycle ignore) {
|
||||
|
||||
if (periodicUpdates != null){
|
||||
log.trace("stopping periodic updates of container profile");
|
||||
|
||||
try {
|
||||
periodicUpdates.cancel(true);
|
||||
periodicUpdates=null;
|
||||
}
|
||||
catch(Exception e) {
|
||||
log.warn("could not stop periodic updates of container profile",e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return profile_management;
|
||||
}
|
||||
}
|
|
@ -23,7 +23,6 @@ import javax.servlet.ServletContextListener;
|
|||
import javax.servlet.ServletRegistration;
|
||||
|
||||
import org.gcube.common.events.Observes;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationExtensions;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationHandlers;
|
||||
import org.gcube.smartgears.context.application.ApplicationContext;
|
||||
|
@ -69,11 +68,8 @@ public class ApplicationManager {
|
|||
for (Entry<String,? extends ServletRegistration> servlet : application.getServletRegistrations().entrySet())
|
||||
log.trace("servlet {} : {} {} ", application.getServletContextName(),servlet.getKey(), servlet.getValue().getMappings());
|
||||
|
||||
if (context.container().configuration().mode()!=Mode.offline) {
|
||||
context.configuration().authorizedContexts(context.container().configuration().allowedContexts());
|
||||
context.configuration().validate();
|
||||
//TODO take information from container to configure application
|
||||
}
|
||||
context.configuration().validate();
|
||||
|
||||
saveApplicationState();
|
||||
|
||||
// make context available to application in case it is gcube-aware
|
||||
|
@ -279,6 +275,7 @@ public class ApplicationManager {
|
|||
@Override
|
||||
public void contextInitialized(ServletContextEvent sce) {
|
||||
log.info("initilizing context {} ",context.name());
|
||||
|
||||
context.events().fire(context.application().getContextPath(), ApplicationLifecycle.activation);
|
||||
log.info("webApp {} initialized ",context.name());
|
||||
}
|
||||
|
|
|
@ -14,16 +14,17 @@ import java.io.File;
|
|||
import java.io.FileInputStream;
|
||||
import java.io.InputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
|
||||
import org.gcube.common.events.Hub;
|
||||
import org.gcube.common.events.impl.DefaultHub;
|
||||
import org.gcube.informationsystem.publisher.RegistryPublisherFactory;
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationConfiguration;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationConfigurationBinder;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationExtensions;
|
||||
|
@ -41,8 +42,15 @@ import org.gcube.smartgears.context.container.DefaultContainerContext;
|
|||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.lifecycle.application.ApplicationLifecycle;
|
||||
import org.gcube.smartgears.lifecycle.container.ContainerLifecycle;
|
||||
import org.gcube.smartgears.publishing.Publisher;
|
||||
import org.gcube.smartgears.publishing.SmartgearsProfilePublisher;
|
||||
import org.gcube.smartgears.security.AuthorizationProvider;
|
||||
import org.gcube.smartgears.utils.Utils;
|
||||
import org.reflections.Reflections;
|
||||
import org.reflections.scanners.SubTypesScanner;
|
||||
import org.reflections.scanners.TypeAnnotationsScanner;
|
||||
import org.reflections.util.ClasspathHelper;
|
||||
import org.reflections.util.ConfigurationBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -66,6 +74,8 @@ public class DefaultProvider implements Provider {
|
|||
this.configFile = configFile;
|
||||
}
|
||||
|
||||
List<Publisher> publishers;
|
||||
|
||||
|
||||
protected DefaultProvider(){};
|
||||
|
||||
|
@ -194,7 +204,7 @@ public class DefaultProvider implements Provider {
|
|||
|
||||
ApplicationConfigurationBinder binder = new ApplicationConfigurationBinder();
|
||||
|
||||
|
||||
|
||||
|
||||
//searching for smartegars related application handlers in the common classloader
|
||||
ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
|
||||
|
@ -204,7 +214,7 @@ public class DefaultProvider implements Provider {
|
|||
}
|
||||
|
||||
ApplicationHandlers defaultHandlers = binder.bindHandlers(currentClassLoader);
|
||||
|
||||
|
||||
return defaultHandlers;
|
||||
|
||||
|
||||
|
@ -329,9 +339,38 @@ public class DefaultProvider implements Provider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ScopedPublisher publisher() {
|
||||
return containerContext.configuration().mode()==Mode.online? RegistryPublisherFactory.scopedPublisher()
|
||||
: new OfflinePublisher();
|
||||
public synchronized List<Publisher> publishers() {
|
||||
if (this.publishers == null) {
|
||||
//retrieve from root class loader
|
||||
Collection<URL> urls = ClasspathHelper.forClassLoader(Thread.currentThread().getContextClassLoader());
|
||||
urls.removeIf(url -> url.toString().endsWith(".so") || url.toString().endsWith(".zip") );
|
||||
|
||||
|
||||
ConfigurationBuilder reflectionConf = new ConfigurationBuilder().addUrls(urls).setScanners(new TypeAnnotationsScanner(), new SubTypesScanner());
|
||||
|
||||
Reflections reflection = new Reflections(reflectionConf);
|
||||
|
||||
Set<Class<?>> annotatedPublishers = reflection.getTypesAnnotatedWith(SmartgearsProfilePublisher.class);
|
||||
List<Publisher> foundPublishers = new ArrayList<Publisher>();
|
||||
for (Class<?> annotatedPublisher: annotatedPublishers) {
|
||||
if (Publisher.class.isAssignableFrom(annotatedPublisher))
|
||||
try {
|
||||
foundPublishers.add((Publisher)annotatedPublisher.newInstance());
|
||||
log.info("added class {} to publishers",annotatedPublisher);
|
||||
} catch (Exception e) {
|
||||
log.error("publisher class {} cannot be instantiated", annotatedPublisher.getCanonicalName(),e);
|
||||
}
|
||||
else
|
||||
log.warn("publisher class {} discarded, it doesn't implements Publisher class", annotatedPublisher.getCanonicalName());
|
||||
|
||||
}
|
||||
this.publishers = foundPublishers;
|
||||
|
||||
if (foundPublishers.isEmpty())
|
||||
log.warn("no publishers found in classloader");
|
||||
}
|
||||
|
||||
return this.publishers;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
package org.gcube.smartgears.provider;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
import org.gcube.common.resources.gcore.Resource;
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.informationsystem.publisher.exception.RegistryNotFoundException;
|
||||
import org.gcube.smartgears.configuration.Mode;
|
||||
|
||||
/**
|
||||
* An implementation of {@link ScopedPublisher} that simulates remote publication.
|
||||
* <p>
|
||||
* Used for applications and or containers that operate in {@link Mode#offline}.
|
||||
*
|
||||
* @author Fabio Simeoni
|
||||
*
|
||||
*/
|
||||
public class OfflinePublisher implements ScopedPublisher {
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T update(T resource){
|
||||
// do nothing
|
||||
return resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T create(T resource, List<String> scopes)
|
||||
throws RegistryNotFoundException {
|
||||
// fragile! bypass restrictions reflectively and set new scope
|
||||
for (String scope : scopes)
|
||||
try {
|
||||
Method m = resource.getClass().getSuperclass().getDeclaredMethod("addScope", String.class);
|
||||
m.setAccessible(true);
|
||||
m.invoke(resource, scope);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("could not simulate publication in scope " + scope, e);
|
||||
}
|
||||
return resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends Resource> T remove(T resource, List<String> scopes)
|
||||
throws RegistryNotFoundException {
|
||||
for (String scope : scopes)
|
||||
try {
|
||||
Method m = resource.getClass().getSuperclass().getDeclaredMethod("removeScope", String.class);
|
||||
m.setAccessible(true);
|
||||
m.invoke(resource, scope);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("could not simulate publication remove from scope " + scope, e);
|
||||
}
|
||||
return resource;
|
||||
}
|
||||
|
||||
}
|
|
@ -4,13 +4,13 @@ import java.util.List;
|
|||
|
||||
import javax.servlet.ServletContext;
|
||||
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationExtensions;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationHandlers;
|
||||
import org.gcube.smartgears.configuration.library.SmartGearsConfiguration;
|
||||
import org.gcube.smartgears.context.application.ApplicationContext;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
import org.gcube.smartgears.handlers.container.ContainerHandler;
|
||||
import org.gcube.smartgears.publishing.Publisher;
|
||||
import org.gcube.smartgears.security.AuthorizationProvider;
|
||||
|
||||
/**
|
||||
|
@ -46,7 +46,7 @@ public interface Provider {
|
|||
* Returns an implementation of the IS publisher for the container
|
||||
* @return the publisher implementation
|
||||
*/
|
||||
ScopedPublisher publisher();
|
||||
List<Publisher> publishers();
|
||||
|
||||
//application-level dependencies
|
||||
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
package org.gcube.smartgears.publishing;
|
||||
|
||||
import org.gcube.smartgears.configuration.application.ApplicationConfiguration;
|
||||
import org.gcube.smartgears.configuration.container.ContainerConfiguration;
|
||||
import java.util.Set;
|
||||
|
||||
import org.gcube.smartgears.context.application.ApplicationContext;
|
||||
import org.gcube.smartgears.context.container.ContainerContext;
|
||||
|
||||
public interface Publisher {
|
||||
|
||||
boolean publishContainer(ContainerConfiguration container, String ... contexts);
|
||||
boolean publishContainer(ContainerContext container, Set<String> contexts);
|
||||
|
||||
boolean publishApplication(ApplicationConfiguration application, String ... contexts);
|
||||
boolean publishApplication(ApplicationContext application, Set<String> contexts);
|
||||
|
||||
boolean unpublishContainer(ContainerConfiguration container, String ... contexts);
|
||||
boolean unpublishContainer(ContainerContext container, Set<String> contexts);
|
||||
|
||||
boolean unpublishApplication(ApplicationConfiguration application, String ... contexts);
|
||||
boolean unpublishApplication(ApplicationContext application, Set<String> contexts);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
package org.gcube.smartgears.publishing;
|
||||
|
||||
public @interface SmartgearsProfilePublisher {
|
||||
|
||||
}
|
|
@ -11,7 +11,7 @@ import org.gcube.common.keycloak.KeycloakClientFactory;
|
|||
import org.gcube.common.keycloak.model.AccessToken.Access;
|
||||
import org.gcube.common.keycloak.model.ModelUtils;
|
||||
import org.gcube.common.keycloak.model.TokenResponse;
|
||||
import org.gcube.common.scope.impl.ScopeBean;
|
||||
import org.gcube.common.security.ContextBean;
|
||||
import org.gcube.common.security.secrets.AccessTokenSecret;
|
||||
import org.gcube.common.security.secrets.Secret;
|
||||
import org.gcube.smartgears.security.AuthorizationProvider;
|
||||
|
@ -38,15 +38,11 @@ public class DefaultAuthorizationProvider implements AuthorizationProvider {
|
|||
public Set<String> getContexts() {
|
||||
Set<String> contexts = new HashSet<String>();
|
||||
try {
|
||||
TokenResponse response;
|
||||
if (this.endpoint == null)
|
||||
response = client.queryOIDCToken(credentials.getClientID(), credentials.getSecret());
|
||||
else
|
||||
response = client.queryOIDCToken(new URL(this.endpoint), credentials.getClientID(), credentials.getSecret());
|
||||
TokenResponse response = client.queryOIDCToken(new URL(this.endpoint), credentials.getClientID(), credentials.getSecret());
|
||||
Map<String, Access> resourceAccess = ModelUtils.getAccessTokenFrom(response).getResourceAccess();
|
||||
for (String context : resourceAccess.keySet()) {
|
||||
try {
|
||||
ScopeBean scope = new ScopeBean(context.replaceAll("%2F", "/"));
|
||||
ContextBean scope = new ContextBean(context.replaceAll("%2F", "/"));
|
||||
contexts.add(scope.toString());
|
||||
LOG.info("found context {}",context);
|
||||
}catch (IllegalArgumentException e) {
|
||||
|
@ -64,12 +60,7 @@ public class DefaultAuthorizationProvider implements AuthorizationProvider {
|
|||
@Override
|
||||
public Secret getSecretForContext(String context) {
|
||||
try {
|
||||
TokenResponse response;
|
||||
if (this.endpoint == null)
|
||||
response = client.queryUMAToken(credentials.getClientID(), credentials.getSecret(), context, null);
|
||||
else
|
||||
response = client.queryUMAToken(new URL(this.endpoint), credentials.getClientID(), credentials.getSecret(), context, null);
|
||||
|
||||
TokenResponse response = client.queryUMAToken(new URL(this.endpoint), credentials.getClientID(), credentials.getSecret(), context, null);
|
||||
return new AccessTokenSecret(response.getAccessToken());
|
||||
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -4,7 +4,6 @@ import java.io.File;
|
|||
|
||||
import javax.servlet.ServletContext;
|
||||
|
||||
import org.gcube.informationsystem.publisher.ScopedPublisher;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationConfiguration;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationExtensions;
|
||||
import org.gcube.smartgears.configuration.application.ApplicationHandlers;
|
||||
|
@ -23,11 +22,7 @@ public class TestProvider extends DefaultProvider {
|
|||
public ApplicationConfiguration configuration;
|
||||
public ApplicationHandlers handlers;
|
||||
public ApplicationExtensions extensions;
|
||||
public ScopedPublisher publisher;
|
||||
|
||||
public void use(ScopedPublisher publisher) {
|
||||
this.publisher=publisher;
|
||||
}
|
||||
|
||||
public void use(ApplicationConfiguration configuration) {
|
||||
this.configuration=configuration;
|
||||
|
@ -50,11 +45,7 @@ public class TestProvider extends DefaultProvider {
|
|||
return conf ;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScopedPublisher publisher() {
|
||||
return publisher==null?super.publisher():publisher;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ApplicationContext contextFor(ContainerContext container,ServletContext application) {
|
||||
return context = super.contextFor(container,application);
|
||||
|
|
|
@ -30,7 +30,7 @@ location = rome
|
|||
; mandatory
|
||||
; optional fields: provider factory (=org.gcube.smartgears.security.defaults.DefaultAuthorizationProviderFactory)
|
||||
factory = org.gcube.smartgears.security.defaults.DefaultAuthorizationProviderFactory
|
||||
endpoint = testEndpoint
|
||||
endpoint = https://accounts.dev.d4science.org/auth/realms/d4science
|
||||
credentials.class = org.gcube.smartgears.security.SimpleCredentials
|
||||
credentials.clientID = testClient
|
||||
credentials.secret = testSecret
|
||||
|
|
Loading…
Reference in New Issue