Added Service Discovery
This commit is contained in:
parent
79f326d906
commit
68a7532379
|
@ -41,7 +41,7 @@ public class CassandraClusterConnection {
|
|||
* @param dropSchema set true if you want do drop the current and set up new one
|
||||
* the connection to cassandra cluster
|
||||
*/
|
||||
protected CassandraClusterConnection(boolean dropSchema) {
|
||||
protected CassandraClusterConnection(boolean dropSchema) throws Exception {
|
||||
if (hosts == null || datacenterName == null || keyspaceName == null) {
|
||||
RunningCluster cluster = RunningCluster.getInstance(null);
|
||||
|
||||
|
@ -62,7 +62,7 @@ public class CassandraClusterConnection {
|
|||
* @param dropSchema set true if you want to drop the current and set up new one
|
||||
* the connection to cassandra cluster
|
||||
*/
|
||||
protected CassandraClusterConnection(boolean dropSchema, String infrastructureName) {
|
||||
protected CassandraClusterConnection(boolean dropSchema, String infrastructureName) throws Exception {
|
||||
if (hosts == null || datacenterName == null || keyspaceName == null) {
|
||||
RunningCluster cluster = RunningCluster.getInstance(infrastructureName);
|
||||
//host = cluster.getHost();
|
||||
|
|
|
@ -45,20 +45,20 @@ public final class DBCassandraAstyanaxImpl implements DatabookStore {
|
|||
* use this constructor carefully from test classes
|
||||
* @param dropSchema set true if you want do drop the current and set up new one
|
||||
*/
|
||||
protected DBCassandraAstyanaxImpl(boolean dropSchema) {
|
||||
protected DBCassandraAstyanaxImpl(boolean dropSchema) throws Exception {
|
||||
conn = new CassandraClusterConnection(dropSchema);
|
||||
}
|
||||
/**
|
||||
* public constructor, no dropping schema is allowed
|
||||
*/
|
||||
public DBCassandraAstyanaxImpl() {
|
||||
public DBCassandraAstyanaxImpl() throws Exception {
|
||||
conn = new CassandraClusterConnection(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* public constructor, no dropping schema is allowed, infrastructureName is given.
|
||||
*/
|
||||
public DBCassandraAstyanaxImpl(String infrastructureName) {
|
||||
public DBCassandraAstyanaxImpl(String infrastructureName) throws Exception {
|
||||
conn = new CassandraClusterConnection(false, infrastructureName);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ public class RunningCluster implements Serializable {
|
|||
/**
|
||||
* other constants
|
||||
*/
|
||||
private final static String RUNTIME_RESOURCE_NAME = "SocialPortalDataStore";
|
||||
private final static String RUNTIME_RESOURCE_NAME = "SocialDB";
|
||||
private final static String PLATFORM_NAME = "Cassandra";
|
||||
|
||||
private static final String DEFAULT_CONFIGURATION = "/org/gcube/portal/databook/server/resources/databook.properties";
|
||||
|
@ -72,7 +72,7 @@ public class RunningCluster implements Serializable {
|
|||
* @param infrastructureName could be null
|
||||
* @return an instance of the RunningCluster
|
||||
*/
|
||||
public static synchronized RunningCluster getInstance(String infrastructureName) {
|
||||
public static synchronized RunningCluster getInstance(String infrastructureName) throws Exception {
|
||||
if (singleton == null) {
|
||||
singleton = new RunningCluster(infrastructureName);
|
||||
}
|
||||
|
@ -81,9 +81,10 @@ public class RunningCluster implements Serializable {
|
|||
/**
|
||||
* private constructor
|
||||
*/
|
||||
private RunningCluster(String infrastructureName) {
|
||||
private RunningCluster(String infrastructureName) throws Exception {
|
||||
//Query the IS (for the future)
|
||||
/*List<ServiceEndpoint> resources = getConfigurationFromIS(infrastructureName);
|
||||
try{
|
||||
List<ServiceEndpoint> resources = getConfigurationFromIS(infrastructureName);
|
||||
if (resources.size() > 1) {
|
||||
_log.error("Too many Runtime Resource having name " + RUNTIME_RESOURCE_NAME +" in this scope ");
|
||||
throw new TooManyRunningClustersException("There exist more than 1 Runtime Resource in this scope having name "
|
||||
|
@ -97,17 +98,17 @@ public class RunningCluster implements Serializable {
|
|||
for (ServiceEndpoint res : resources) {
|
||||
AccessPoint found = res.profile().accessPoints().iterator().next();
|
||||
host = found.address();
|
||||
clusterName = found.description();
|
||||
datacenterName = found.description();
|
||||
keyspaceName = found.name();
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}*/
|
||||
}
|
||||
|
||||
host = "10.1.28.55:9042, 10.1.30.142:9042, 10.1.28.100:9042";
|
||||
/*host = "10.1.28.55:9042, 10.1.30.142:9042, 10.1.28.100:9042";
|
||||
datacenterName = "1";
|
||||
keyspaceName = "dev_mig_consistent";
|
||||
keyspaceName = "dev_mig_consistent";*/
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2,6 +2,7 @@ package org.gcube.portal.databook.server;
|
|||
|
||||
import org.gcube.portal.databook.shared.*;
|
||||
import org.gcube.portal.databook.shared.ex.*;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -18,17 +19,18 @@ public class Tester {
|
|||
private static DBCassandraAstyanaxImpl store;
|
||||
private static Logger LOGGER = LoggerFactory.getLogger(Tester.class);
|
||||
|
||||
public Tester() {
|
||||
public Tester() throws Exception {
|
||||
store = new DBCassandraAstyanaxImpl("gcube"); //set to true if you want to drop the KeySpace and recreate it
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws ColumnNameNotFoundException, PrivacyLevelTypeNotFoundException, FeedIDNotFoundException, FeedTypeNotFoundException {
|
||||
public static void main(String[] args) throws Exception {
|
||||
Tester test = new Tester();
|
||||
//test.getComment();
|
||||
test.testFunc();
|
||||
System.exit(0);
|
||||
|
||||
}
|
||||
@Test
|
||||
public void testFunc() throws ColumnNameNotFoundException, PrivacyLevelTypeNotFoundException, FeedIDNotFoundException, FeedTypeNotFoundException {
|
||||
String postIdToUpdate = "047c601d-2291-4974-9224-d6732b1fbe26";
|
||||
Post read = store.readPost(postIdToUpdate);
|
||||
|
|
Loading…
Reference in New Issue