Feature #17265, Provide oAuth2 service with capability to be deployed on

a multi instance cluster
This commit is contained in:
Massimiliano Assante 2019-08-21 09:46:51 +02:00
parent 316bd0a7fa
commit f1d8489760
3 changed files with 110 additions and 63 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target/

View File

@ -0,0 +1,109 @@
package org.gcube.portal.oauth;
import static org.gcube.resources.discovery.icclient.ICFactory.clientFor;
import static org.gcube.resources.discovery.icclient.ICFactory.queryFor;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.gcube.common.resources.gcore.ServiceEndpoint;
import org.gcube.common.resources.gcore.ServiceEndpoint.AccessPoint;
import org.gcube.common.resources.gcore.utils.Group;
import org.gcube.common.scope.api.ScopeProvider;
import org.gcube.resources.discovery.client.api.DiscoveryClient;
import org.gcube.resources.discovery.client.queries.api.SimpleQuery;
import org.gcube.smartgears.ContextProvider;
import org.gcube.smartgears.context.application.ApplicationContext;
import org.slf4j.LoggerFactory;
import net.spy.memcached.MemcachedClient;
/**
* @author Massimiliano Assante at ISTI-CNR
*/
public class DistributedCacheClient {
// Logger
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(DistributedCacheClient.class);
private static final String MEMCACHED_RESOURCE_NAME = "Memcached";
private static final String CATEGORY = "Database";
private MemcachedClient mClient;
/**
* Singleton object
*/
private static DistributedCacheClient singleton = new DistributedCacheClient();
/**
* Build the singleton instance
*/
private DistributedCacheClient(){
List<InetSocketAddress> addrs = discoverHostOfServiceEndpoint();
try {
mClient = new MemcachedClient(addrs);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Retrieve the singleton instance
*/
public static DistributedCacheClient getInstance(){
if (singleton == null) {
singleton = new DistributedCacheClient();
}
return singleton;
}
public MemcachedClient getMemcachedClient() {
return mClient;
}
/**
* Retrieve endpoint resoruce from IS
* @return List of InetSocketAddresses
* @throws Exception
*/
private static List<InetSocketAddress> discoverHostOfServiceEndpoint(){
String currentScope = ScopeProvider.instance.get();
ApplicationContext ctx = ContextProvider.get(); // get this info from SmartGears
String infrastructure = "/"+ctx.container().configuration().infrastructure();
ScopeProvider.instance.set(infrastructure);
List<InetSocketAddress> toReturn = new ArrayList<InetSocketAddress>();
try{
SimpleQuery query = queryFor(ServiceEndpoint.class);
query.addCondition("$resource/Profile/Name/text() eq '"+ MEMCACHED_RESOURCE_NAME +"'");
query.addCondition("$resource/Profile/Category/text() eq '"+ CATEGORY +"'");
DiscoveryClient<ServiceEndpoint> client = clientFor(ServiceEndpoint.class);
List<ServiceEndpoint> ses = client.submit(query);
if (ses.isEmpty()) {
logger.error("There is no Memcached cluster having name: " + MEMCACHED_RESOURCE_NAME + " and Category " + CATEGORY + " on root context in this infrastructure: ");
return null;
}
for (ServiceEndpoint se : ses) {
Group<AccessPoint> aps = se.profile().accessPoints();
for (AccessPoint ap : aps.asCollection()) {
String address = ap.address(); //e.g. socialnetworking-d-d4s.d4science.org:11211
String[] splits = address.split(":");
String hostname = splits[0];
int port = Integer.parseInt(splits[1]);
toReturn.add(new InetSocketAddress(hostname, port));
}
break;
}
} catch(Exception e){
logger.error("Error while retrieving hosts for the Memcached cluster having name: " + MEMCACHED_RESOURCE_NAME + " and Category " + CATEGORY + " on root context");
}finally{
ScopeProvider.instance.set(currentScope);
}
ScopeProvider.instance.set(currentScope);
return toReturn;
}
}

View File

@ -1,63 +0,0 @@
package org.gcube.portal.oauth.cache;
import java.util.Date;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import org.slf4j.LoggerFactory;
/**
* This thread cleans a cache by removing expired entries.
* @author Costantino Perciante at ISTI-CNR (costantino.perciante@isti.cnr.it)
*/
public class CacheCleaner extends Thread {
private Map<String, CacheBean> cacheReference;
private static final int CHECK_AFTER_MS = 1000 * 60 * 10;
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(CacheCleaner.class);
/**
* Build a cleaner thread.
* @param cache
*/
public CacheCleaner(Map<String, CacheBean> cache) {
this.cacheReference = cache;
}
@Override
public void run() {
while (!isInterrupted()) {
try {
sleep(CHECK_AFTER_MS);
logger.info("Going to clean up cache and old codes [" + new Date() + "]");
int removedEntries = 0;
Iterator<Entry<String, CacheBean>> iterator = cacheReference.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<java.lang.String, org.gcube.portal.oauth.cache.CacheBean> entry = (Map.Entry<java.lang.String, org.gcube.portal.oauth.cache.CacheBean>) iterator
.next();
if(CacheBean.isExpired(entry.getValue())){
logger.debug("Removing entry " + entry.getValue());
removedEntries ++;
iterator.remove();
}
}
logger.info("Going to sleep. Number of removed entries is " + removedEntries + " [" + new Date() + "]");
} catch (InterruptedException e) {
logger.warn("Exception was " + e.getMessage());
continue;
}
}
}
}