Merge branch 'master' into feature-odf
diff --git a/odf/0001-ATLAS-1898-initial-commit-of-ODF.patch b/odf/0001-ATLAS-1898-initial-commit-of-ODF.patch
new file mode 100644
index 0000000..ec1c344
--- /dev/null
+++ b/odf/0001-ATLAS-1898-initial-commit-of-ODF.patch
@@ -0,0 +1,45230 @@
+From ae030999189fa4dcf14e9d07e422167bb340beb2 Mon Sep 17 00:00:00 2001
+From: Mandy Chessell <mandy_chessell@uk.ibm.com>
+Date: Mon, 26 Jun 2017 18:07:03 +0100
+Subject: [PATCH] ATLAS-1898: initial commit of ODF
+
+---
+ odf/README.md                                      |    6 +
+ odf/jettyconfig/jetty-https.xml                    |   63 ++
+ odf/jettyconfig/jetty-ssl.xml                      |   45 +
+ odf/jettyconfig/jetty.xml                          |   28 +
+ odf/jettyconfig/realm.properties                   |   24 +
+ odf/odf-api/.gitignore                             |    5 +
+ odf/odf-api/pom.xml                                |  100 ++
+ .../java/org/apache/atlas/odf/api/ODFFactory.java  |   41 +
+ .../atlas/odf/api/OpenDiscoveryFramework.java      |   79 ++
+ .../odf/api/analysis/AnalysisCancelResult.java     |   34 +
+ .../atlas/odf/api/analysis/AnalysisManager.java    |   62 ++
+ .../atlas/odf/api/analysis/AnalysisRequest.java    |  108 ++
+ .../odf/api/analysis/AnalysisRequestStatus.java    |  124 +++
+ .../odf/api/analysis/AnalysisRequestSummary.java   |   52 +
+ .../api/analysis/AnalysisRequestTrackerStatus.java |   25 +
+ .../odf/api/analysis/AnalysisRequestTrackers.java  |   36 +
+ .../atlas/odf/api/analysis/AnalysisResponse.java   |   66 ++
+ .../atlas/odf/api/annotation/AnnotationStore.java  |   43 +
+ .../odf/api/annotation/AnnotationStoreUtils.java   |  129 +++
+ .../atlas/odf/api/annotation/Annotations.java      |   31 +
+ .../api/connectivity/DataRetrievalException.java   |   35 +
+ .../odf/api/connectivity/DataSetRetriever.java     |   33 +
+ .../odf/api/connectivity/DataSetRetrieverImpl.java |  298 ++++++
+ .../odf/api/connectivity/JDBCRetrievalResult.java  |   48 +
+ .../odf/api/connectivity/RESTClientManager.java    |   88 ++
+ .../discoveryservice/AnalysisRequestTracker.java   |  130 +++
+ .../api/discoveryservice/DataSetCheckResult.java   |   54 +
+ .../odf/api/discoveryservice/DiscoveryService.java |   42 +
+ .../api/discoveryservice/DiscoveryServiceBase.java |   54 +
+ .../discoveryservice/DiscoveryServiceEndpoint.java |   50 +
+ .../DiscoveryServiceJavaEndpoint.java              |   50 +
+ .../discoveryservice/DiscoveryServiceManager.java  |   93 ++
+ .../DiscoveryServiceProperties.java                |  173 ++++
+ .../DiscoveryServicePropertiesList.java            |   33 +
+ .../discoveryservice/DiscoveryServiceRequest.java  |  179 ++++
+ .../discoveryservice/DiscoveryServiceResponse.java |   62 ++
+ .../discoveryservice/DiscoveryServiceResult.java   |   46 +
+ .../DiscoveryServiceRuntimeStatistics.java         |   39 +
+ .../DiscoveryServiceSparkEndpoint.java             |   79 ++
+ .../discoveryservice/DiscoveryServiceStatus.java   |   62 ++
+ .../discoveryservice/ServiceNotFoundException.java |   38 +
+ .../api/discoveryservice/ServiceStatusCount.java   |   58 ++
+ .../discoveryservice/SyncDiscoveryServiceBase.java |   46 +
+ .../async/AsyncDiscoveryService.java               |   29 +
+ .../async/DiscoveryServiceAsyncRunStatus.java      |   77 ++
+ .../async/DiscoveryServiceAsyncStartResponse.java  |   41 +
+ .../datasets/DataSetContainer.java                 |   52 +
+ .../datasets/MaterializedDataSet.java              |   57 +
+ .../sync/DiscoveryServiceSyncResponse.java         |   40 +
+ .../sync/SyncDiscoveryService.java                 |   33 +
+ .../apache/atlas/odf/api/engine/BrokerNode.java    |   42 +
+ .../apache/atlas/odf/api/engine/EngineManager.java |   76 ++
+ .../KafkaBrokerPartitionMessageCountInfo.java      |   39 +
+ .../atlas/odf/api/engine/KafkaGroupOffsetInfo.java |   45 +
+ .../atlas/odf/api/engine/KafkaPartitionInfo.java   |   45 +
+ .../apache/atlas/odf/api/engine/KafkaStatus.java   |   46 +
+ .../atlas/odf/api/engine/KafkaTopicStatus.java     |   69 ++
+ .../atlas/odf/api/engine/MessagingStatus.java      |   21 +
+ .../atlas/odf/api/engine/ODFEngineOptions.java     |   32 +
+ .../org/apache/atlas/odf/api/engine/ODFStatus.java |   45 +
+ .../apache/atlas/odf/api/engine/ODFVersion.java    |   32 +
+ .../atlas/odf/api/engine/PartitionOffsetInfo.java  |   53 +
+ .../atlas/odf/api/engine/ServiceRuntimeInfo.java   |   36 +
+ .../atlas/odf/api/engine/ServiceRuntimesInfo.java  |   29 +
+ .../apache/atlas/odf/api/engine/SystemHealth.java  |   62 ++
+ .../apache/atlas/odf/api/engine/ThreadStatus.java  |   57 +
+ .../odf/api/metadata/AnnotationPropagator.java     |   31 +
+ .../api/metadata/AtlasMetadataQueryBuilder.java    |   61 ++
+ .../api/metadata/DefaultMetadataQueryBuilder.java  |   69 ++
+ .../atlas/odf/api/metadata/ExternalStore.java      |   44 +
+ .../odf/api/metadata/InternalMetaDataUtils.java    |   88 ++
+ .../api/metadata/InternalMetadataStoreBase.java    |   93 ++
+ .../atlas/odf/api/metadata/InvalidReference.java   |   77 ++
+ .../odf/api/metadata/MetaDataObjectReference.java  |  100 ++
+ .../odf/api/metadata/MetadataQueryBuilder.java     |   92 ++
+ .../atlas/odf/api/metadata/MetadataStore.java      |  173 ++++
+ .../atlas/odf/api/metadata/MetadataStoreBase.java  |  111 ++
+ .../odf/api/metadata/MetadataStoreException.java   |   36 +
+ .../odf/api/metadata/RESTMetadataStoreHelper.java  |   51 +
+ .../atlas/odf/api/metadata/ReferenceCache.java     |   54 +
+ .../odf/api/metadata/RemoteMetadataStore.java      |  385 +++++++
+ .../odf/api/metadata/StoredMetaDataObject.java     |   61 ++
+ .../odf/api/metadata/UnknownMetaDataObject.java    |   22 +
+ .../importer/JDBCMetadataImportResult.java         |   42 +
+ .../metadata/importer/JDBCMetadataImporter.java    |   36 +
+ .../metadata/importer/MetadataImportException.java |   32 +
+ .../atlas/odf/api/metadata/models/Annotation.java  |   61 ++
+ .../odf/api/metadata/models/BusinessTerm.java      |   44 +
+ .../api/metadata/models/CachedMetadataStore.java   |  137 +++
+ .../metadata/models/ClassificationAnnotation.java  |   38 +
+ .../atlas/odf/api/metadata/models/Column.java      |   32 +
+ .../atlas/odf/api/metadata/models/Connection.java  |   18 +
+ .../odf/api/metadata/models/ConnectionInfo.java    |   64 ++
+ .../atlas/odf/api/metadata/models/DataFile.java    |   39 +
+ .../odf/api/metadata/models/DataFileFolder.java    |   18 +
+ .../atlas/odf/api/metadata/models/DataSet.java     |   22 +
+ .../atlas/odf/api/metadata/models/DataStore.java   |   35 +
+ .../atlas/odf/api/metadata/models/Database.java    |   31 +
+ .../atlas/odf/api/metadata/models/Document.java    |   41 +
+ .../odf/api/metadata/models/JDBCConnection.java    |   49 +
+ .../api/metadata/models/JDBCConnectionInfo.java    |   49 +
+ .../odf/api/metadata/models/MetaDataCache.java     |   47 +
+ .../odf/api/metadata/models/MetaDataObject.java    |   96 ++
+ .../api/metadata/models/ProfilingAnnotation.java   |   28 +
+ .../odf/api/metadata/models/RelationalDataSet.java |   24 +
+ .../metadata/models/RelationshipAnnotation.java    |   31 +
+ .../atlas/odf/api/metadata/models/Schema.java      |   18 +
+ .../atlas/odf/api/metadata/models/Table.java       |   23 +
+ .../odf/api/metadata/models/UnknownConnection.java |   21 +
+ .../api/metadata/models/UnknownConnectionInfo.java |   21 +
+ .../odf/api/metadata/models/UnknownDataSet.java    |   22 +
+ .../odf/api/metadata/models/UnknownDataStore.java  |   21 +
+ .../metadata/models/UnknownRelationalDataSet.java  |   21 +
+ .../odf/api/settings/KafkaConsumerConfig.java      |   74 ++
+ .../api/settings/KafkaMessagingConfiguration.java  |   63 ++
+ .../odf/api/settings/MessagingConfiguration.java   |   42 +
+ .../apache/atlas/odf/api/settings/ODFSettings.java |  206 ++++
+ .../atlas/odf/api/settings/SettingsManager.java    |   79 ++
+ .../apache/atlas/odf/api/settings/SparkConfig.java |   52 +
+ .../odf/api/settings/validation/EnumValidator.java |   37 +
+ .../validation/ImplementationValidator.java        |   42 +
+ .../validation/NumberPositiveValidator.java        |   32 +
+ .../api/settings/validation/PropertyValidator.java |   20 +
+ .../validation/StringNotEmptyValidator.java        |   27 +
+ .../settings/validation/ValidationException.java   |   63 ++
+ .../atlas/odf/api/spark/SparkDiscoveryService.java |   34 +
+ .../odf/api/spark/SparkDiscoveryServiceBase.java   |   34 +
+ .../atlas/odf/api/spark/SparkServiceExecutor.java  |   33 +
+ .../org/apache/atlas/odf/api/spark/SparkUtils.java |  308 ++++++
+ .../apache/atlas/odf/api/utils/ODFLogConfig.java   |  114 ++
+ .../atlas/odf/json/AnnotationDeserializer.java     |  165 +++
+ .../atlas/odf/json/AnnotationSerializer.java       |  121 +++
+ .../atlas/odf/json/DefaultODFDeserializer.java     |   69 ++
+ .../java/org/apache/atlas/odf/json/JSONUtils.java  |  254 +++++
+ .../odf/test/json/ODFJSONSerializationTest.java    |  406 ++++++++
+ odf/odf-archetype-discoveryservice/.gitignore      |    4 +
+ odf/odf-archetype-discoveryservice/pom.xml         |   52 +
+ .../main/resources/META-INF/maven/archetype.xml    |   27 +
+ .../src/main/resources/archetype-resources/pom.xml |   42 +
+ .../src/main/java/MyAnnotation.java                |   17 +
+ .../src/main/java/MyDiscoveryService.java          |   33 +
+ .../main/resources/META-INF/odf/odf-services.json  |   11 +
+ .../src/test/java/MyDiscoveryServiceTest.java      |   15 +
+ .../resources/projects/it1/archetype.properties    |   23 +
+ .../src/test/resources/projects/it1/goal.txt       |    1 +
+ odf/odf-atlas/.gitignore                           |    7 +
+ odf/odf-atlas/atlasconfig/jetty-web.xml            |   25 +
+ odf/odf-atlas/atlasconfig/realm.properties         |   24 +
+ odf/odf-atlas/build_atlas.xml                      |  265 +++++
+ odf/odf-atlas/pom.xml                              |  216 ++++
+ .../core/metadata/atlas/AtlasMetadataStore.java    |  842 +++++++++++++++
+ .../odf/core/metadata/atlas/AtlasModelBridge.java  |  409 ++++++++
+ .../metadata/internal/atlas/atlas-odf-model.json   |  444 ++++++++
+ .../internal/atlas/atlas-odf-object-reference.json |   16 +
+ .../internal/atlas/atlas-odf-object-template.json  |   16 +
+ .../internal/atlas/atlas-reference-template.json   |    6 +
+ .../apache/atlas/odf/odf-implementation.properties |   15 +
+ .../core/runtime/ODFFactoryClassesNoMockTest.java  |   50 +
+ odf/odf-core/.gitignore                            |    6 +
+ odf/odf-core/pom.xml                               |  112 ++
+ .../java/org/apache/atlas/odf/core/Encryption.java |   67 ++
+ .../org/apache/atlas/odf/core/Environment.java     |   39 +
+ .../apache/atlas/odf/core/ODFImplementations.java  |   95 ++
+ .../org/apache/atlas/odf/core/ODFInitializer.java  |   97 ++
+ .../apache/atlas/odf/core/ODFInternalFactory.java  |   93 ++
+ .../java/org/apache/atlas/odf/core/ODFUtils.java   |   77 ++
+ .../atlas/odf/core/OpenDiscoveryFrameworkImpl.java |   82 ++
+ .../atlas/odf/core/StandaloneEnvironment.java      |   71 ++
+ .../main/java/org/apache/atlas/odf/core/Utils.java |  314 ++++++
+ .../odf/core/analysis/AnalysisManagerImpl.java     |  177 ++++
+ .../annotation/InternalAnnotationStoreUtils.java   |   48 +
+ .../odf/core/configuration/ConfigContainer.java    |   68 ++
+ .../odf/core/configuration/ConfigManager.java      |  235 +++++
+ .../ODFConfigNotificationPublisher.java            |   45 +
+ .../odf/core/configuration/ServiceValidator.java   |   75 ++
+ .../atlas/odf/core/controlcenter/AdminMessage.java |   60 ++
+ .../core/controlcenter/AdminQueueProcessor.java    |   92 ++
+ .../controlcenter/AnalysisRequestTrackerStore.java |   53 +
+ .../AsyncDiscoveryServiceWrapper.java              |  108 ++
+ .../controlcenter/ConfigChangeQueueProcessor.java  |   45 +
+ .../odf/core/controlcenter/ControlCenter.java      |  454 ++++++++
+ .../controlcenter/DeclarativeRequestMapper.java    |  279 +++++
+ .../controlcenter/DefaultStatusQueueStore.java     |  478 +++++++++
+ .../core/controlcenter/DefaultThreadManager.java   |  276 +++++
+ .../DefaultTransactionContextExecutor.java         |   29 +
+ .../controlcenter/DiscoveryServiceStarter.java     |  303 ++++++
+ .../DiscoveryServiceUnreachableException.java      |   30 +
+ .../core/controlcenter/ExecutorServiceFactory.java |   33 +
+ .../controlcenter/HealthCheckServiceRuntime.java   |   73 ++
+ .../odf/core/controlcenter/JavaServiceRuntime.java |   87 ++
+ .../atlas/odf/core/controlcenter/ODFRunnable.java  |   27 +
+ .../core/controlcenter/QueueMessageProcessor.java  |   32 +
+ .../odf/core/controlcenter/ServiceRuntime.java     |   42 +
+ .../odf/core/controlcenter/ServiceRuntimes.java    |  147 +++
+ .../controlcenter/SparkDiscoveryServiceProxy.java  |  110 ++
+ .../core/controlcenter/SparkServiceRuntime.java    |   58 ++
+ .../odf/core/controlcenter/StatusQueueEntry.java   |   52 +
+ .../odf/core/controlcenter/ThreadManager.java      |   68 ++
+ .../atlas/odf/core/controlcenter/TrackerUtil.java  |   76 ++
+ .../TransactionAsyncDiscoveryServiceProxy.java     |   97 ++
+ .../controlcenter/TransactionContextExecutor.java  |   33 +
+ .../TransactionSyncDiscoveryServiceProxy.java      |   79 ++
+ .../DiscoveryServiceManagerImpl.java               |  258 +++++
+ .../DiscoveryServiceStatistics.java                |   83 ++
+ .../atlas/odf/core/engine/EngineManagerImpl.java   |  221 ++++
+ .../core/messaging/DefaultMessageEncryption.java   |   53 +
+ .../messaging/DiscoveryServiceQueueManager.java    |   39 +
+ .../odf/core/messaging/MessageEncryption.java      |   20 +
+ .../odf/core/metadata/DefaultMetadataStore.java    |  381 +++++++
+ .../core/metadata/JDBCMetadataImporterImpl.java    |  181 ++++
+ .../atlas/odf/core/metadata/SampleDataHelper.java  |   67 ++
+ .../odf/core/metadata/WritableMetadataStore.java   |  111 ++
+ .../core/metadata/WritableMetadataStoreBase.java   |  117 +++
+ .../core/metadata/WritableMetadataStoreUtils.java  |  297 ++++++
+ .../notification/DefaultNotificationManager.java   |   26 +
+ .../core/notification/NotificationListener.java    |   35 +
+ .../odf/core/notification/NotificationManager.java |   26 +
+ .../odf/core/settings/SettingsManagerImpl.java     |  137 +++
+ .../odf/core/store/ODFConfigurationStorage.java    |   31 +
+ .../internal/odf-default-implementation.properties |   30 +
+ .../core/internal/odf-initial-configuration.json   |   28 +
+ .../internal/sampledata/bank-clients-short.csv     |  500 +++++++++
+ .../internal/sampledata/sample-data-toc.properties |   17 +
+ .../sampledata/simple-example-document.txt         |    1 +
+ .../internal/sampledata/simple-example-table.csv   |    4 +
+ .../org/apache/atlas/odf/core/odfversion.txt       |    1 +
+ .../ODFAPITestWithMetadataStoreBase.java           |  136 +++
+ ...PITestWithMetadataStoreExtendedAnnotations.java |   74 ++
+ .../ODFAPITestWithMetadataStoreJsonAnnotation.java |   69 ++
+ .../ODFAPITestWithMetadataStoreSimple.java         |  134 +++
+ .../connectivity/DataSetRetrieverTest.java         |   92 ++
+ .../metadata/MetadataStoreTestBase.java            |  303 ++++++
+ .../metadata/WritableMetadataStoreTest.java        |   24 +
+ .../importer/JDBCMetadataImporterTest.java         |  214 ++++
+ .../spark/SparkDiscoveryServiceLocalTest.java      |  243 +++++
+ .../metadata/models/CachedMetadataStoreTest.java   |   54 +
+ .../odf/core/test/ODFInternalFactoryTest.java      |   58 ++
+ .../apache/atlas/odf/core/test/ODFTestBase.java    |   67 ++
+ .../apache/atlas/odf/core/test/ODFTestLogger.java  |   24 +
+ .../apache/atlas/odf/core/test/ODFTestcase.java    |   27 +
+ .../atlas/odf/core/test/TestEnvironment.java       |   67 ++
+ .../odf/core/test/TestEnvironmentInitializer.java  |   22 +
+ .../apache/atlas/odf/core/test/TimerTestBase.java  |   87 ++
+ .../test/annotation/AnnotationExtensionTest.java   |  114 ++
+ .../core/test/annotation/AnnotationStoreTest.java  |   62 ++
+ .../test/annotation/ExtensionTestAnnotation.java   |   39 +
+ ...DiscoveryServiceWritingExtendedAnnotations.java |  147 +++
+ ...SyncDiscoveryServiceWritingJsonAnnotations.java |   63 ++
+ .../test/configuration/ODFConfigurationTest.java   |  165 +++
+ .../test/configuration/PasswordEncryptionTest.java |   83 ++
+ .../core/test/configuration/ValidationTests.java   |  103 ++
+ .../controlcenter/AnalysisProcessingTests.java     |  139 +++
+ .../AnalysisRequestCancellationTest.java           |  104 ++
+ .../AnalysisRequestTrackerStoreTest.java           |  105 ++
+ .../DeclarativeRequestMapperTest.java              |  158 +++
+ .../controlcenter/DefaultThreadManagerTest.java    |  172 ++++
+ .../odf/core/test/controlcenter/ODFAPITest.java    |  373 +++++++
+ .../core/test/controlcenter/ParallelODFTest.java   |  101 ++
+ .../test/controlcenter/SetTrackerStatusTest.java   |   66 ++
+ .../DiscoveryServiceManagerTest.java               |  135 +++
+ .../TestAsyncDiscoveryService1.java                |  227 ++++
+ ...stAsyncDiscoveryServiceWritingAnnotations1.java |   99 ++
+ .../TestSyncDiscoveryService1.java                 |   61 ++
+ ...estSyncDiscoveryServiceWritingAnnotations1.java |  156 +++
+ .../atlas/odf/core/test/engine/ODFVersionTest.java |   30 +
+ .../atlas/odf/core/test/engine/ShutdownTest.java   |   90 ++
+ .../odf/core/test/messaging/MockQueueManager.java  |  249 +++++
+ .../test/notification/NotificationManagerTest.java |   72 ++
+ .../test/notification/TestNotificationManager.java |   66 ++
+ .../core/test/runtime/RuntimeExtensionTest.java    |  114 ++
+ .../odf/core/test/runtime/TestServiceRuntime.java  |   80 ++
+ .../core/test/spark/MockSparkServiceExecutor.java  |   59 ++
+ .../spark/SimpleSparkDiscoveryServiceTest.java     |   91 ++
+ .../core/test/store/MockConfigurationStorage.java  |   80 ++
+ .../test/resources/META-INF/odf/odf-runtimes.txt   |    1 +
+ .../internal/atlas/nested_annotation_example.json  |  111 ++
+ .../odf/core/test/annotation/annotexttest1.json    |    8 +
+ .../test/internal/odf-initial-configuration.json   |  114 ++
+ .../odf/core/test/messaging/kafka/tracker1.json    |   31 +
+ .../apache/atlas/odf/odf-implementation.properties |   20 +
+ odf/odf-doc/.gitignore                             |    6 +
+ odf/odf-doc/README.txt                             |    3 +
+ odf/odf-doc/pom.xml                                |  163 +++
+ odf/odf-doc/src/main/webapp/WEB-INF/web.xml        |   21 +
+ odf/odf-doc/src/site/markdown/api-reference.md     |    5 +
+ odf/odf-doc/src/site/markdown/build.md             |   99 ++
+ odf/odf-doc/src/site/markdown/configuration.md     |    1 +
+ odf/odf-doc/src/site/markdown/data-model.md        |  106 ++
+ .../site/markdown/discovery-service-tutorial.md    |  143 +++
+ .../src/site/markdown/discovery-services.md        |    1 +
+ odf/odf-doc/src/site/markdown/examples.md          |    1 +
+ .../src/site/markdown/first-analysis-tutorial.md   |    3 +
+ odf/odf-doc/src/site/markdown/first-steps.md       |   65 ++
+ odf/odf-doc/src/site/markdown/index.md             |   12 +
+ odf/odf-doc/src/site/markdown/install.md           |  137 +++
+ odf/odf-doc/src/site/markdown/jenkins-build.md     |   81 ++
+ odf/odf-doc/src/site/markdown/odf-metadata-api.md  |   49 +
+ odf/odf-doc/src/site/markdown/operations.md        |    1 +
+ .../markdown/spark-discovery-service-tutorial.md   |  192 ++++
+ odf/odf-doc/src/site/markdown/test-env.md          |   71 ++
+ odf/odf-doc/src/site/markdown/troubleshooting.md   |  112 ++
+ .../odf-tutorial-discoveryservice/pom.xml          |   44 +
+ .../ODFTutorialAnnotation.java                     |   33 +
+ .../ODFTutorialDiscoveryService.java               |   46 +
+ .../main/resources/META-INF/odf/odf-services.json  |   13 +
+ .../ODFTutorialDiscoveryServiceTest.java           |   29 +
+ odf/odf-doc/src/site/site.xml                      |   62 ++
+ odf/odf-messaging/.gitignore                       |    6 +
+ odf/odf-messaging/pom.xml                          |  208 ++++
+ .../odf/core/messaging/kafka/KafkaMonitor.java     |  545 ++++++++++
+ .../core/messaging/kafka/KafkaProducerManager.java |  105 ++
+ .../core/messaging/kafka/KafkaQueueConsumer.java   |  233 +++++
+ .../core/messaging/kafka/KafkaQueueManager.java    |  488 +++++++++
+ .../core/messaging/kafka/KafkaRuntimeConsumer.java |  104 ++
+ .../messaging/kafka/MessageSearchConsumer.java     |  224 ++++
+ .../apache/atlas/odf/odf-implementation.properties |   14 +
+ .../kafka/KafkaQueueConsumerExceptionTest.java     |  137 +++
+ .../messaging/kafka/KafkaQueueManagerTest.java     |  303 ++++++
+ .../messaging/kafka/MessageSearchConsumerTest.java |  193 ++++
+ .../kafka/MultiPartitionConsumerTest.java          |  314 ++++++
+ .../messaging/kafka/ParallelServiceErrorTest.java  |   99 ++
+ .../test/messaging/kafka/ParallelServiceTest.java  |  100 ++
+ .../kafka/TestEnvironmentMessagingInitializer.java |   49 +
+ .../test/messaging/kafka/TestKafkaStarter.java     |  306 ++++++
+ .../messaging/kafka/test-embedded-kafka.properties |  136 +++
+ .../kafka/test-embedded-zookeeper.properties       |   34 +
+ .../apache/atlas/odf/odf-implementation.properties |   18 +
+ odf/odf-spark-example-application/.gitignore       |    7 +
+ odf/odf-spark-example-application/pom.xml          |   74 ++
+ .../core/spark/SparkDiscoveryServiceExample.java   |   57 +
+ .../atlas/odf/core/spark/SummaryStatistics.java    |  112 ++
+ odf/odf-spark/.gitignore                           |    6 +
+ odf/odf-spark/pom.xml                              |  242 +++++
+ .../odf/core/spark/LocalSparkServiceExecutor.java  |  154 +++
+ .../org/apache/atlas/odf/core/spark/SparkJars.java |  107 ++
+ .../odf/core/spark/SparkServiceExecutorImpl.java   |  102 ++
+ .../apache/atlas/odf/odf-implementation.properties |   14 +
+ odf/odf-store/.gitignore                           |    5 +
+ odf/odf-store/pom.xml                              |   87 ++
+ .../zookeeper34/ZookeeperConfigurationStorage.java |  247 +++++
+ .../zookeeper/test-embedded-zookeeper.properties   |   34 +
+ .../apache/atlas/odf/odf-implementation.properties |   14 +
+ .../core/store/zookeeper34/test/TestZookeeper.java |  181 ++++
+ .../test/ZookeeperConfigurationStorageTest.java    |   54 +
+ .../apache/atlas/odf/odf-implementation.properties |   16 +
+ odf/odf-test-env/.gitignore                        |    5 +
+ odf/odf-test-env/pom.xml                           |  142 +++
+ odf/odf-test-env/prepare_components.xml            |  169 +++
+ odf/odf-test-env/src/assembly/bin.xml              |   73 ++
+ odf/odf-test-env/src/main/config/server.properties |  134 +++
+ .../src/main/config/zookeeper.properties           |   33 +
+ odf/odf-test-env/src/main/scripts/clean_atlas.bat  |   22 +
+ odf/odf-test-env/src/main/scripts/clean_atlas.sh   |   22 +
+ .../src/main/scripts/deploy-odf-war.bat            |   24 +
+ .../src/main/scripts/deploy-odf-war.sh             |   21 +
+ .../main/scripts/download-install-odf-testenv.sh   |   73 ++
+ .../src/main/scripts/jenkins-manage-testenv.sh     |   69 ++
+ odf/odf-test-env/src/main/scripts/odftestenv.sh    |  232 +++++
+ .../src/main/scripts/start-odf-testenv.bat         |   57 +
+ .../src/main/scripts/start-odf-testenv.sh          |   53 +
+ .../src/main/scripts/stop-odf-testenv.sh           |   16 +
+ odf/odf-web/.gitignore                             |   11 +
+ odf/odf-web/download_swagger-ui.xml                |   63 ++
+ odf/odf-web/package.json                           |   30 +
+ odf/odf-web/pom.xml                                |  441 ++++++++
+ .../apache/atlas/odf/admin/log/LoggingHandler.java |   71 ++
+ .../apache/atlas/odf/admin/rest/ODFAdminApp.java   |   50 +
+ .../org/apache/atlas/odf/admin/rest/RestUtils.java |   48 +
+ .../odf/admin/rest/resources/AnalysesResource.java |  156 +++
+ .../admin/rest/resources/AnnotationsResource.java  |  130 +++
+ .../rest/resources/DiscoveryServicesResource.java  |  341 ++++++
+ .../odf/admin/rest/resources/EngineResource.java   |  167 +++
+ .../odf/admin/rest/resources/ImportResource.java   |   89 ++
+ .../odf/admin/rest/resources/MetadataResource.java |  246 +++++
+ .../odf/admin/rest/resources/SettingsResource.java |  128 +++
+ .../org/apache/atlas/odf/images/activity_32.png    |  Bin 0 -> 322 bytes
+ .../apache/atlas/odf/images/applications_32.png    |  Bin 0 -> 467 bytes
+ .../org/apache/atlas/odf/images/bar-chart_32.png   |  Bin 0 -> 224 bytes
+ .../org/apache/atlas/odf/images/world_32.png       |  Bin 0 -> 562 bytes
+ odf/odf-web/src/main/webapp/.gitignore             |    6 +
+ odf/odf-web/src/main/webapp/WEB-INF/web.xml        |   52 +
+ odf/odf-web/src/main/webapp/client_index.html      |   14 +
+ odf/odf-web/src/main/webapp/img/lg_proc.gif        |  Bin 0 -> 17230 bytes
+ odf/odf-web/src/main/webapp/index.html             |   14 +
+ odf/odf-web/src/main/webapp/odf-config.js          |   15 +
+ .../main/webapp/scripts/odf-analysis-request.js    |  473 +++++++++
+ odf/odf-web/src/main/webapp/scripts/odf-client.js  | 1087 ++++++++++++++++++++
+ .../main/webapp/scripts/odf-configuration-store.js |   63 ++
+ odf/odf-web/src/main/webapp/scripts/odf-console.js |  967 +++++++++++++++++
+ odf/odf-web/src/main/webapp/scripts/odf-globals.js |   54 +
+ odf/odf-web/src/main/webapp/scripts/odf-logs.js    |   83 ++
+ .../main/webapp/scripts/odf-metadata-browser.js    |  661 ++++++++++++
+ odf/odf-web/src/main/webapp/scripts/odf-mixins.js  |   51 +
+ .../src/main/webapp/scripts/odf-notifications.js   |  171 +++
+ .../src/main/webapp/scripts/odf-request-browser.js |  154 +++
+ .../src/main/webapp/scripts/odf-services.js        |  251 +++++
+ .../src/main/webapp/scripts/odf-settings.js        |  552 ++++++++++
+ .../src/main/webapp/scripts/odf-statistics.js      |  413 ++++++++
+ odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js |  316 ++++++
+ odf/odf-web/src/main/webapp/scripts/odf-utils.js   |  338 ++++++
+ odf/odf-web/src/main/webapp/swagger/index.html     |  125 +++
+ .../integrationtest/admin/EngineResourceTest.java  |   79 ++
+ .../admin/SettingsResourceTest.java                |   97 ++
+ .../analysis/test/ODFVersionTest.java              |   47 +
+ .../annotations/AnnotationsResourceTest.java       |  174 ++++
+ .../metadata/MetadataResourceTest.java             |   81 ++
+ .../metadata/RemoteMetadataStoreTest.java          |   97 ++
+ .../spark/SparkDiscoveryServiceWebTest.java        |  133 +++
+ .../apache/atlas/odf/rest/test/RestTestBase.java   |  289 ++++++
+ odf/odf-web/webpack.config.js                      |   65 ++
+ odf/pom.xml                                        |  133 +++
+ odf/prepare_embedded_jetty.xml                     |   90 ++
+ 412 files changed, 41551 insertions(+)
+ create mode 100755 odf/README.md
+ create mode 100755 odf/jettyconfig/jetty-https.xml
+ create mode 100755 odf/jettyconfig/jetty-ssl.xml
+ create mode 100755 odf/jettyconfig/jetty.xml
+ create mode 100755 odf/jettyconfig/realm.properties
+ create mode 100755 odf/odf-api/.gitignore
+ create mode 100755 odf/odf-api/pom.xml
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java
+ create mode 100755 odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java
+ create mode 100755 odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java
+ create mode 100755 odf/odf-archetype-discoveryservice/.gitignore
+ create mode 100755 odf/odf-archetype-discoveryservice/pom.xml
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json
+ create mode 100755 odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java
+ create mode 100755 odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties
+ create mode 100755 odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt
+ create mode 100755 odf/odf-atlas/.gitignore
+ create mode 100755 odf/odf-atlas/atlasconfig/jetty-web.xml
+ create mode 100755 odf/odf-atlas/atlasconfig/realm.properties
+ create mode 100755 odf/odf-atlas/build_atlas.xml
+ create mode 100755 odf/odf-atlas/pom.xml
+ create mode 100755 odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java
+ create mode 100755 odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java
+ create mode 100755 odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
+ create mode 100755 odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json
+ create mode 100755 odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json
+ create mode 100755 odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json
+ create mode 100755 odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java
+ create mode 100755 odf/odf-core/.gitignore
+ create mode 100755 odf/odf-core/pom.xml
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java
+ create mode 100755 odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv
+ create mode 100755 odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java
+ create mode 100755 odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java
+ create mode 100755 odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt
+ create mode 100755 odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json
+ create mode 100755 odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json
+ create mode 100755 odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json
+ create mode 100755 odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json
+ create mode 100755 odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-doc/.gitignore
+ create mode 100755 odf/odf-doc/README.txt
+ create mode 100755 odf/odf-doc/pom.xml
+ create mode 100755 odf/odf-doc/src/main/webapp/WEB-INF/web.xml
+ create mode 100755 odf/odf-doc/src/site/markdown/api-reference.md
+ create mode 100755 odf/odf-doc/src/site/markdown/build.md
+ create mode 100755 odf/odf-doc/src/site/markdown/configuration.md
+ create mode 100755 odf/odf-doc/src/site/markdown/data-model.md
+ create mode 100755 odf/odf-doc/src/site/markdown/discovery-service-tutorial.md
+ create mode 100755 odf/odf-doc/src/site/markdown/discovery-services.md
+ create mode 100755 odf/odf-doc/src/site/markdown/examples.md
+ create mode 100755 odf/odf-doc/src/site/markdown/first-analysis-tutorial.md
+ create mode 100755 odf/odf-doc/src/site/markdown/first-steps.md
+ create mode 100755 odf/odf-doc/src/site/markdown/index.md
+ create mode 100755 odf/odf-doc/src/site/markdown/install.md
+ create mode 100755 odf/odf-doc/src/site/markdown/jenkins-build.md
+ create mode 100755 odf/odf-doc/src/site/markdown/odf-metadata-api.md
+ create mode 100755 odf/odf-doc/src/site/markdown/operations.md
+ create mode 100755 odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md
+ create mode 100755 odf/odf-doc/src/site/markdown/test-env.md
+ create mode 100755 odf/odf-doc/src/site/markdown/troubleshooting.md
+ create mode 100755 odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml
+ create mode 100755 odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java
+ create mode 100755 odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java
+ create mode 100755 odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json
+ create mode 100755 odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java
+ create mode 100755 odf/odf-doc/src/site/site.xml
+ create mode 100755 odf/odf-messaging/.gitignore
+ create mode 100755 odf/odf-messaging/pom.xml
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java
+ create mode 100755 odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java
+ create mode 100755 odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java
+ create mode 100755 odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java
+ create mode 100755 odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties
+ create mode 100755 odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties
+ create mode 100755 odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-spark-example-application/.gitignore
+ create mode 100755 odf/odf-spark-example-application/pom.xml
+ create mode 100755 odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java
+ create mode 100755 odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java
+ create mode 100755 odf/odf-spark/.gitignore
+ create mode 100755 odf/odf-spark/pom.xml
+ create mode 100755 odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java
+ create mode 100755 odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java
+ create mode 100755 odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java
+ create mode 100755 odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-store/.gitignore
+ create mode 100755 odf/odf-store/pom.xml
+ create mode 100755 odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java
+ create mode 100755 odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties
+ create mode 100755 odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
+ create mode 100755 odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
+ create mode 100755 odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+ create mode 100755 odf/odf-test-env/.gitignore
+ create mode 100755 odf/odf-test-env/pom.xml
+ create mode 100755 odf/odf-test-env/prepare_components.xml
+ create mode 100755 odf/odf-test-env/src/assembly/bin.xml
+ create mode 100755 odf/odf-test-env/src/main/config/server.properties
+ create mode 100755 odf/odf-test-env/src/main/config/zookeeper.properties
+ create mode 100755 odf/odf-test-env/src/main/scripts/clean_atlas.bat
+ create mode 100755 odf/odf-test-env/src/main/scripts/clean_atlas.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
+ create mode 100755 odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/odftestenv.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
+ create mode 100755 odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
+ create mode 100755 odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
+ create mode 100755 odf/odf-web/.gitignore
+ create mode 100755 odf/odf-web/download_swagger-ui.xml
+ create mode 100755 odf/odf-web/package.json
+ create mode 100755 odf/odf-web/pom.xml
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java
+ create mode 100755 odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java
+ create mode 100755 odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png
+ create mode 100755 odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png
+ create mode 100755 odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png
+ create mode 100755 odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png
+ create mode 100755 odf/odf-web/src/main/webapp/.gitignore
+ create mode 100755 odf/odf-web/src/main/webapp/WEB-INF/web.xml
+ create mode 100755 odf/odf-web/src/main/webapp/client_index.html
+ create mode 100755 odf/odf-web/src/main/webapp/img/lg_proc.gif
+ create mode 100755 odf/odf-web/src/main/webapp/index.html
+ create mode 100755 odf/odf-web/src/main/webapp/odf-config.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-client.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-console.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-globals.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-logs.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-mixins.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-notifications.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-request-browser.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-services.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-settings.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-statistics.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js
+ create mode 100755 odf/odf-web/src/main/webapp/scripts/odf-utils.js
+ create mode 100755 odf/odf-web/src/main/webapp/swagger/index.html
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java
+ create mode 100755 odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java
+ create mode 100755 odf/odf-web/webpack.config.js
+ create mode 100755 odf/pom.xml
+ create mode 100755 odf/prepare_embedded_jetty.xml
+
+diff --git a/odf/README.md b/odf/README.md
+new file mode 100755
+index 0000000..23157b2
+--- /dev/null
++++ b/odf/README.md
+@@ -0,0 +1,6 @@
++Open Discovery Framework
++==========================
++
++The Open Discovery Framework (ODF) is an open metadata-based framework that strives to be a common home for different analytics technologies that discover characteristics of data sets and relationships between them (think "AppStore for discovery algorithms"). Using ODF, applications can leverage new discovery algorithms and their results with minimal integration effort.
++
++See [here](odf-doc/src/site/markdown/build.md) how to build and deploy ODF.
+diff --git a/odf/jettyconfig/jetty-https.xml b/odf/jettyconfig/jetty-https.xml
+new file mode 100755
+index 0000000..283e511
+--- /dev/null
++++ b/odf/jettyconfig/jetty-https.xml
+@@ -0,0 +1,63 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
++<Configure id="Server" class="org.eclipse.jetty.server.Server">
++	<Call id="httpsConnector" name="addConnector">
++		<Arg>
++			<New class="org.eclipse.jetty.server.ServerConnector">
++				<Arg name="server">
++					<Ref refid="Server" />
++				</Arg>
++				<Arg name="factories">
++					<Array type="org.eclipse.jetty.server.ConnectionFactory">
++						<Item>
++							<New class="org.eclipse.jetty.server.SslConnectionFactory">
++								<Arg name="next">http/1.1</Arg>
++								<Arg name="sslContextFactory">
++									<Ref refid="sslContextFactory" />
++								</Arg>
++							</New>
++						</Item>
++						<Item>
++							<New class="org.eclipse.jetty.server.HttpConnectionFactory">
++								<Arg name="config">
++									<Ref refid="sslHttpConfig" />
++								</Arg>
++							</New>
++						</Item>
++					</Array>
++				</Arg>
++				<Set name="host">
++					<Property name="jetty.host" />
++				</Set>
++				<Set name="port">
++					<Property name="jetty.maven.plugin.port" default="58080" />
++				</Set>
++				<Set name="idleTimeout">
++					<Property name="https.timeout" default="30000" />
++				</Set>
++			</New>
++		</Arg>
++	</Call>
++	<Call name="addBean">
++		<Arg>
++			<New class="org.eclipse.jetty.security.HashLoginService">
++				<Set name="name">ODF Realm</Set>
++				<Set name="config"><Property name="jetty.config.dir" default="../target/jettyconfig" />/realm.properties</Set>
++			</New>
++		</Arg>
++	</Call>
++</Configure>
+diff --git a/odf/jettyconfig/jetty-ssl.xml b/odf/jettyconfig/jetty-ssl.xml
+new file mode 100755
+index 0000000..fb5b5e3
+--- /dev/null
++++ b/odf/jettyconfig/jetty-ssl.xml
+@@ -0,0 +1,45 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
++<Configure id="sslContextFactory" class="org.eclipse.jetty.util.ssl.SslContextFactory">
++	<Set name="KeyStorePath"><Property name="jetty.config.dir" default="../target/jettyconfig" />/keystore.jks</Set>
++	<Set name="KeyStorePassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
++	<Set name="KeyManagerPassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
++	<Set name="TrustStorePath"><Property name="jetty.config.dir" default="../target/jettyconfig" />/keystore.jks</Set>
++	<Set name="TrustStorePassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
++	<Set name="EndpointIdentificationAlgorithm"></Set>
++	<Set name="ExcludeCipherSuites">
++		<Array type="String">
++			<Item>SSL_RSA_WITH_DES_CBC_SHA</Item>
++			<Item>SSL_DHE_RSA_WITH_DES_CBC_SHA</Item>
++			<Item>SSL_DHE_DSS_WITH_DES_CBC_SHA</Item>
++			<Item>SSL_RSA_EXPORT_WITH_RC4_40_MD5</Item>
++			<Item>SSL_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
++			<Item>SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
++			<Item>SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA</Item>
++		</Array>
++	</Set>
++	<New id="sslHttpConfig" class="org.eclipse.jetty.server.HttpConfiguration">
++		<Arg>
++			<Ref refid="httpConfig" />
++		</Arg>
++		<Call name="addCustomizer">
++			<Arg>
++				<New class="org.eclipse.jetty.server.SecureRequestCustomizer" />
++			</Arg>
++		</Call>
++	</New>
++</Configure>
+diff --git a/odf/jettyconfig/jetty.xml b/odf/jettyconfig/jetty.xml
+new file mode 100755
+index 0000000..c754b48
+--- /dev/null
++++ b/odf/jettyconfig/jetty.xml
+@@ -0,0 +1,28 @@
++<?xml version="1.0"?>
++<!--
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
++<!-- ============================================================= -->
++<!-- Configure the Http Configuration -->
++<!-- ============================================================= -->
++<Configure id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration">
++	<Set name="secureScheme">https</Set>
++	<Set name="securePort"><Property name="jetty.maven.plugin.port" default="58080" /></Set>
++	<Set name="outputBufferSize">32768</Set>
++	<Set name="requestHeaderSize">8192</Set>
++	<Set name="responseHeaderSize">8192</Set>
++	<Set name="sendServerVersion">true</Set>
++	<Set name="sendDateHeader">false</Set>
++	<Set name="headerCacheSize">512</Set>
++</Configure>
+diff --git a/odf/jettyconfig/realm.properties b/odf/jettyconfig/realm.properties
+new file mode 100755
+index 0000000..109d726
+--- /dev/null
++++ b/odf/jettyconfig/realm.properties
+@@ -0,0 +1,24 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Credentials for ODF basic authentication
++#
++# Format:
++# <username>: <password>[,<rolename> ...]
++#
++# Password is stored in obfuscated format.
++# Re-generate password using the org.eclipse.jetty.util.security.Password class in the jetty lib folder.
++# Example:
++# cd jetty-distribution-<version>/lib
++# java -cp jetty-util-<version>.jar org.eclipse.jetty.util.security.Password <plain password>
++sdp: OBF:1ugg1sov1xfd1k8k1wn31k5m1xfp1sov1uha,user
+diff --git a/odf/odf-api/.gitignore b/odf/odf-api/.gitignore
+new file mode 100755
+index 0000000..ea5ddb8
+--- /dev/null
++++ b/odf/odf-api/.gitignore
+@@ -0,0 +1,5 @@
++.settings
++target
++.classpath
++.project
++.factorypath
+\ No newline at end of file
+diff --git a/odf/odf-api/pom.xml b/odf/odf-api/pom.xml
+new file mode 100755
+index 0000000..5c8258d
+--- /dev/null
++++ b/odf/odf-api/pom.xml
+@@ -0,0 +1,100 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-api</artifactId>
++	<properties>
++		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
++	</properties>
++	<dependencies>
++		<dependency>
++			<groupId>com.fasterxml.jackson.core</groupId>
++			<artifactId>jackson-annotations</artifactId>
++			<version>${jackson.version}</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>com.fasterxml.jackson.core</groupId>
++			<artifactId>jackson-databind</artifactId>
++			<version>${jackson.version}</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.wink</groupId>
++			<artifactId>wink-json4j</artifactId>
++			<version>1.4</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.commons</groupId>
++			<artifactId>commons-csv</artifactId>
++			<version>1.2</version>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.hamcrest</groupId>
++			<artifactId>hamcrest-all</artifactId>
++			<version>1.3</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.httpcomponents</groupId>
++			<artifactId>fluent-hc</artifactId>
++			<version>4.5.1</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<artifactId>swagger-jaxrs</artifactId>
++			<version>1.5.9</version>
++			<groupId>io.swagger</groupId>
++			<scope>compile</scope>
++		</dependency>
++		<!-- The following dependencies are required by Spark Discovery Services only and are provided by the Spark cluster -->
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-core_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-sql_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++	</dependencies>
++</project>
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java
+new file mode 100755
+index 0000000..20676b4
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java
+@@ -0,0 +1,41 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api;
++
++import java.text.MessageFormat;
++
++public class ODFFactory {
++
++	private final static String ODF_DEFAULT_IMPLEMENTATION = "org.apache.atlas.odf.core.OpenDiscoveryFrameworkImpl";
++
++	public OpenDiscoveryFramework create() {
++		Object o = null;
++		Class<?> clazz;
++		try {
++			clazz = this.getClass().getClassLoader().loadClass(ODF_DEFAULT_IMPLEMENTATION);
++		} catch (ClassNotFoundException e) {
++			throw new RuntimeException(MessageFormat.format("Class {0} was not found. Make sure that the odf-core jar and all its dependencies are available on the classpath.", ODF_DEFAULT_IMPLEMENTATION));
++		}
++		try {
++			o = clazz.newInstance();
++		} catch (InstantiationException | IllegalAccessException e) {
++			throw new RuntimeException(MessageFormat.format("Class {0} was found on the classpath but could not be accessed.", ODF_DEFAULT_IMPLEMENTATION));
++		}
++		if (o instanceof OpenDiscoveryFramework) {
++			return (OpenDiscoveryFramework) o;
++		} else {
++			throw new RuntimeException(MessageFormat.format("The class {0} on the classpath is not of type OpenDiscoveryFramework.", ODF_DEFAULT_IMPLEMENTATION));
++		}
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java
+new file mode 100755
+index 0000000..70ab91b
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java
+@@ -0,0 +1,79 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api;
++
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.engine.EngineManager;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++
++/**
++*
++* External Java API for managing and controlling ODF
++*
++*/
++public interface OpenDiscoveryFramework {
++
++	/**
++	 * Returns API for managing ODF analysis requests
++	 *
++	 * @return ODF analysis manager API
++	 */
++	public AnalysisManager getAnalysisManager();
++
++	/**
++	 * Returns API for managing ODF discovery services
++	 *
++	 * @return ODF discovery services manager API
++	 */
++	public DiscoveryServiceManager getDiscoveryServiceManager();
++
++	/**
++	 * Returns API for controlling the ODF engine
++	 *
++	 * @return ODF engine manager API
++	 */
++	public EngineManager getEngineManager();
++
++	/**
++	 * Returns API for managing ODF settings
++	 *
++	 * @return ODF settings manager API
++	 */
++	public SettingsManager getSettingsManager();
++
++	/**
++	 * Returns ODF annotation store API
++	 *
++	 * @return ODF annotation store API
++	 */
++	public AnnotationStore getAnnotationStore();
++
++	/**
++	 * Returns ODF metadata store API
++	 *
++	 * @return ODF metadata store API
++	 */
++	public MetadataStore getMetadataStore();
++
++	/**
++	 * Returns JDBC importer utility for populating the metadata store with sample data
++	 *
++	 * @return ODF JDBC importer utility
++	 */
++	public JDBCMetadataImporter getJDBCMetadataImporter();
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java
+new file mode 100755
+index 0000000..cd294c5
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java
+@@ -0,0 +1,34 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++public class AnalysisCancelResult {
++
++	public enum State {
++		NOT_FOUND,
++		INVALID_STATE,
++		SUCCESS
++	}
++
++	private State state;
++
++	public State getState() {
++		return state;
++	}
++
++	public void setState(State state) {
++		this.state = state;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java
+new file mode 100755
+index 0000000..6ff6098
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java
+@@ -0,0 +1,62 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++/**
++ *
++ * External interface for creating and managing analysis requests
++ *
++ */
++public interface AnalysisManager {
++
++	/**
++	 * Issues a new ODF analysis request
++	 *
++	 * @param request Analysis request
++	 * @return Response containing the request id and status information
++	 */
++	public AnalysisResponse runAnalysis(AnalysisRequest request);
++
++	/**
++	 * Retrieve status of an ODF analysis request
++	 *
++	 * @param requestId Unique id of the analysis request
++	 * @return Status of the analysis request
++	 */
++	public AnalysisRequestStatus getAnalysisRequestStatus(String requestId);
++
++	/**
++	 * Retrieve statistics about all previous ODF analysis requests
++	 *
++	 * @return Request summary
++	 */
++	public AnalysisRequestSummary getAnalysisStats();
++
++	/**
++	 * Retrieve status details of recent ODF analysis requests
++	 *
++	 * @param offset Starting offset (use 0 to start with the latest request)
++	 * @param limit Maximum number of analysis requests to be returned (use -1 to retrieve all requests)
++	 * @return Status details for each discovery request
++	 */
++	public AnalysisRequestTrackers getAnalysisRequests(int offset, int limit);
++
++	/**
++	 * Request a specific ODF discovery request to be canceled
++	 *
++	 * @param requestId Unique id of the analysis request
++	 * @return Status of the cancellation attempt
++	 */
++	public AnalysisCancelResult cancelAnalysisRequest(String requestId);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java
+new file mode 100755
+index 0000000..3aa5937
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java
+@@ -0,0 +1,108 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++@ApiModel(description="Request for starting a discovery service.")
++public class AnalysisRequest {
++
++	// only used when returned by the ODF
++	@ApiModelProperty(value="Unique request id (generated)", readOnly=true, required=false)
++	private String id;
++
++	@ApiModelProperty(value="Data set to be analyzed (currently limited to  a single data set)", required=true)
++	private List<MetaDataObjectReference> dataSets = new ArrayList<>();
++
++	@ApiModelProperty(value="Sequence of ids (or single id) of the discovery services to be issued", required=false)
++	private List<String> discoveryServiceSequence = new ArrayList<String>();
++
++	@ApiModelProperty(value="List annotation types to be created on the dataset(s)", required=false)
++	private List<String> annotationTypes = new ArrayList<String>();
++
++	@ApiModelProperty(value="Optional additional properties map to be passed to the discovery service(s)", required=false)
++	private Map<String, Object> additionalProperties = new HashMap<String, Object>();
++
++	@ApiModelProperty(value="Indicates that multiple data sets should be processed sequentially rather than in parallel", required=false)
++	private boolean processDataSetsSequentially = true;
++
++	// if false the request will fail if some discovery service that cannot process a data set 
++	@ApiModelProperty(value="Indicates that access to the data set should not be checked before starting the discovery service", required=false)
++	private boolean ignoreDataSetCheck = false;
++
++	public List<MetaDataObjectReference> getDataSets() {
++		return dataSets;
++	}
++
++	public void setDataSets(List<MetaDataObjectReference> dataSets) {
++		this.dataSets = dataSets;
++	}
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public List<String> getDiscoveryServiceSequence() {
++		return discoveryServiceSequence;
++	}
++
++	public void setDiscoveryServiceSequence(List<String> discoveryServiceSequence) {
++		this.discoveryServiceSequence = discoveryServiceSequence;
++	}
++
++	public List<String> getAnnotationTypes() {
++		return annotationTypes;
++	}
++
++	public void setAnnotationTypes(List<String> annotationTypes) {
++		this.annotationTypes = annotationTypes;
++	}
++
++	public Map<String, Object> getAdditionalProperties() {
++		return additionalProperties;
++	}
++
++	public void setAdditionalProperties(Map<String, Object> additionalProperties) {
++		this.additionalProperties = additionalProperties;
++	}
++
++	public boolean isProcessDataSetsSequentially() {
++		return processDataSetsSequentially;
++	}
++
++	public void setProcessDataSetsSequentially(boolean processDataSetsSequentially) {
++		this.processDataSetsSequentially = processDataSetsSequentially;
++	}
++
++	public boolean isIgnoreDataSetCheck() {
++		return ignoreDataSetCheck;
++	}
++
++	public void setIgnoreDataSetCheck(boolean ignoreDataSetCheck) {
++		this.ignoreDataSetCheck = ignoreDataSetCheck;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java
+new file mode 100755
+index 0000000..b6a120e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java
+@@ -0,0 +1,124 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++@ApiModel(description="Status of a specific analysis request.")
++public class AnalysisRequestStatus {
++
++	public static enum State {
++		ACTIVE, // some discovery service is processing the request 
++		QUEUED, // in the queue for some discovery service
++		ERROR, // something went wrong 
++		FINISHED, // processing finished successfully 
++		NOT_FOUND, // request ID was not found
++		CANCELLED // request was cancelled by the user
++	}
++
++	@ApiModelProperty(value="Analysis request that was submitted", readOnly=true, required=true)
++	private AnalysisRequest request;
++
++	@ApiModelProperty(value="Status of the request", readOnly=true, required=true)
++	private State state;
++
++	@ApiModelProperty(value="Detailed status description", readOnly=true, required=false)
++	private String details;
++
++	@ApiModelProperty(value="Indicates whether an equivalent request was found", readOnly=true, required=true)
++	private boolean foundExistingRequest = false;
++
++	@ApiModelProperty(value="List of individual discovery service requests that make up the analysis request", readOnly=true, required=true)
++	private List<DiscoveryServiceRequest> serviceRequests;
++
++	@ApiModelProperty(value="Total time the request was queued in milliseconds", readOnly=true, required=true)
++	private long totalTimeOnQueues;
++
++	@ApiModelProperty(value="Total time needed for processing the analysis request in milliseconds", readOnly=true, required=true)
++	private long totalTimeProcessing;
++
++	@ApiModelProperty(value="Total time needed for storing the annotations in the metadata repository in milliseconds", readOnly=true, required=true)
++	private long totalTimeStoringAnnotations;
++
++	public AnalysisRequest getRequest() {
++		return request;
++	}
++
++	public void setRequest(AnalysisRequest request) {
++		this.request = request;
++	}
++
++	public State getState() {
++		return state;
++	}
++
++	public void setState(State state) {
++		this.state = state;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++	public boolean isFoundExistingRequest() {
++		return foundExistingRequest;
++	}
++
++	public void setFoundExistingRequest(boolean foundExistingRequest) {
++		this.foundExistingRequest = foundExistingRequest;
++	}
++
++	public List<DiscoveryServiceRequest> getServiceRequests() {
++		return serviceRequests;
++	}
++
++	public void setServiceRequests(List<DiscoveryServiceRequest> requests) {
++		this.serviceRequests = requests;
++	}
++
++	public long getTotalTimeOnQueues() {
++		return totalTimeOnQueues;
++	}
++
++	public void setTotalTimeOnQueues(long totalTimeOnQueues) {
++		this.totalTimeOnQueues = totalTimeOnQueues;
++	}
++
++	public long getTotalTimeProcessing() {
++		return totalTimeProcessing;
++	}
++
++	public void setTotalTimeProcessing(long totalTimeProcessing) {
++		this.totalTimeProcessing = totalTimeProcessing;
++	}
++
++	public long getTotalTimeStoringAnnotations() {
++		return totalTimeStoringAnnotations;
++	}
++
++	public void setTotalTimeStoringAnnotations(long totalTimeStoringAnnotations) {
++		this.totalTimeStoringAnnotations = totalTimeStoringAnnotations;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java
+new file mode 100755
+index 0000000..a7982ef
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java
+@@ -0,0 +1,52 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status summary of all analysis requests submitted since last start of ODF.")
++public class AnalysisRequestSummary {
++
++	@ApiModelProperty(value="Number of successful analysis requests", readOnly=true, required=true)
++	private int success;
++
++	@ApiModelProperty(value="Number of failing analysis requests", readOnly=true, required=true)
++	private int failure;
++
++	AnalysisRequestSummary() {
++	}
++
++	public AnalysisRequestSummary(int success, int failure) {
++		this.success = success;
++		this.failure = failure;
++	}
++	
++	public int getSuccess() {
++		return this.success;
++	}
++
++	public void setSuccess(int success) {
++		this.success = success;
++	}
++
++	public int getFailure() {
++		return this.failure;
++	}
++
++	public void setFailure(int failure) {
++		this.failure = failure;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java
+new file mode 100755
+index 0000000..49ca9f7
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java
+@@ -0,0 +1,25 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++public class AnalysisRequestTrackerStatus {
++	public static enum STATUS {
++		INITIALIZED, //tracker was created, nothing else happened so far
++		IN_DISCOVERY_SERVICE_QUEUE, //tracker is put on queue but not running yet
++		DISCOVERY_SERVICE_RUNNING, //only for async services, analysis is running
++		FINISHED, //analysis finished
++		ERROR, // an error occurred during analysis / processing
++		CANCELLED //the analysis was cancelled by the user
++	};
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java
+new file mode 100755
+index 0000000..846ed3d
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java
+@@ -0,0 +1,36 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Container object tracking the status of multiple analysis requests.")
++public class AnalysisRequestTrackers {
++
++	@ApiModelProperty(value="List of container objects tracking the status of analysis requests", required=true)
++	private List<AnalysisRequestTracker> analysisRequestTrackers;
++
++	public List<AnalysisRequestTracker> getAnalysisRequestTrackers() {
++		return this.analysisRequestTrackers;
++	}
++
++	public void setAnalysisRequestTrackers(List<AnalysisRequestTracker> analysisRequestTrackers) {
++		this.analysisRequestTrackers = analysisRequestTrackers;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java
+new file mode 100755
+index 0000000..9f1e45c
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java
+@@ -0,0 +1,66 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.analysis;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++@ApiModel(description="Response returned by the analysis request.")
++public class AnalysisResponse {
++	@ApiModelProperty(value="Unique request id", readOnly=true, required=true)
++	private String id;
++
++	@ApiModelProperty(value="Original request that is equivalent to the submitted one which is therefore skipped", readOnly=true, required=true)
++	private AnalysisRequest originalRequest;
++
++	private boolean isInvalidRequest = false;
++
++	@ApiModelProperty(value="Details about why the request is invalid", readOnly=true, required=false)
++	private String details;
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public AnalysisRequest getOriginalRequest() {
++		return originalRequest;
++	}
++
++	public void setOriginalRequest(AnalysisRequest originalRequest) {
++		this.originalRequest = originalRequest;
++	}
++
++	@ApiModelProperty(name="isInvalidRequest", value="Indicates whether the submitted request is invalid", readOnly=true, required=true)
++	public boolean isInvalidRequest() {
++		return isInvalidRequest;
++	}
++
++	public void setInvalidRequest(boolean isInvalidRequest) {
++		this.isInvalidRequest = isInvalidRequest;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java
+new file mode 100755
+index 0000000..7e08d74
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java
+@@ -0,0 +1,43 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.annotation;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.ExternalStore;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++
++public interface AnnotationStore extends ExternalStore {
++
++	/**
++	 * @return the reference to the object that was created or updated
++	 */
++	MetaDataObjectReference store(Annotation annotation);
++	
++	/**
++	 * Get all annotations attached to the meta data object for a specific analysis request.
++	 */
++	List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId);
++	
++	/**
++	 * Retrieve an annotation by ID
++	 */
++	Annotation retrieveAnnotation(MetaDataObjectReference ref);
++	
++	/// internal
++	void setAnalysisRun(String analysisRun);
++
++	String getAnalysisRun();
++};
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java
+new file mode 100755
+index 0000000..e51faf6
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java
+@@ -0,0 +1,129 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.annotation;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++
++public class AnnotationStoreUtils {
++
++	/**
++	 * Return the most recent annotations for the passed object but at most one per annotation type.
++	 * Note that this might not be suitable for the semantics represented by some annotation types. 
++	 */
++	public static List<Annotation> getMostRecentAnnotationsByType(AnnotationStore as, MetaDataObjectReference object) {
++		try {
++			// Fix issue 99: only return one annotation per type
++			Map<String, Annotation> mostRecentAnnotationsByType = new HashMap<>();
++			Map<String, Long> typeToMaxTimestamp = new HashMap<>();
++			for (Annotation annot : as.getAnnotations(object, null)) {
++				Long ts = getTimestamp(annot);
++				String annotType = annot.getAnnotationType();
++				Long l = typeToMaxTimestamp.get(annotType);
++				if (l == null) {
++					l = ts;
++				}
++				if (l <= ts) {
++					typeToMaxTimestamp.put(annotType, Long.valueOf(ts));
++					mostRecentAnnotationsByType.put(annotType, annot);
++				}
++			}
++			return new ArrayList<>(mostRecentAnnotationsByType.values());
++		} catch (Exception exc) {
++			throw new MetadataStoreException(exc);
++		}
++	}
++	
++	private static long getTimestamp(Annotation annot) {
++		final long defaultVal = -1;
++		String runId = annot.getAnalysisRun();
++		int ix = runId.lastIndexOf("_");
++		if (ix == -1) {
++			return defaultVal;
++		}
++		String millis = runId.substring(ix);
++		long result = defaultVal;
++		try {
++			result = Long.valueOf(millis);
++		} catch (NumberFormatException e) {
++			return defaultVal;
++		}
++		return result;
++	}
++
++	/**
++	 * Retrieve the annotations of a meta data object for a given annotation store, and annotation type that have been created by
++	 * a certain request.
++	 * 
++	 * @param mdo
++	 * @param store
++	 * @param annotationType
++	 * @param requestId
++	 * @return List of annotations for this mdo, annotation store, and annotation type created by the request with the ID 'requestId'
++	 */
++	
++	public static List<Annotation> retrieveAnnotationsOfRun(MetaDataObject mdo, AnnotationStore store, String annotationType, String requestId) {
++		Logger logger = Logger.getLogger(AnnotationStoreUtils.class.getName());
++		List<Annotation> annotations = new ArrayList<>();
++		for (Annotation annot : store.getAnnotations(mdo.getReference(), null)) {
++			logger.log(Level.FINER, "Found annotation on object {0} with analysis run {1} and annotationType {2}",
++					new Object[] { mdo.getReference().getId(), annot.getAnalysisRun(), annot.getAnnotationType() });
++			if (annot.getAnalysisRun().equals(requestId) && annot.getAnnotationType().equals(annotationType)) {
++				annotations.add(annot);
++			}
++		}
++		return annotations;
++	}
++
++	/**
++	 * For a given annotation return the reference to the annotated object. Throw a MetaDataStoreException if this reference is null. 
++	 * 
++	 * @param annot
++	 * @return Meta data reference of annotation 'annot'
++	 */
++	
++	public static MetaDataObjectReference getAnnotatedObject(Annotation annot) {
++		MetaDataObjectReference annotRef = null;
++		if (annot instanceof ProfilingAnnotation) {
++			annotRef = ((ProfilingAnnotation) annot).getProfiledObject();
++		} else if (annot instanceof ClassificationAnnotation) {
++			annotRef = ((ClassificationAnnotation) annot).getClassifiedObject();
++		} else if (annot instanceof RelationshipAnnotation) {
++			
++			List<MetaDataObjectReference> refs = ((RelationshipAnnotation) annot).getRelatedObjects();
++			if (refs != null && refs.size() > 0) {
++				annotRef = refs.get(0);
++			}
++		}
++		if (annotRef == null) {
++			String errorMessage = MessageFormat.format("The annotated object of annotation with ID ''{0}'' is null.", annot.getReference().getId());
++			throw new MetadataStoreException(errorMessage);
++		}
++		return annotRef;
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java
+new file mode 100755
+index 0000000..058b472
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java
+@@ -0,0 +1,31 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.annotation;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++
++public class Annotations {
++	private List<Annotation> annotations;
++
++	public List<Annotation> getAnnotations() {
++		return annotations;
++	}
++
++	public void setAnnotations(List<Annotation> annotations) {
++		this.annotations = annotations;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java
+new file mode 100755
+index 0000000..c67ae97
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java
+@@ -0,0 +1,35 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.connectivity;
++
++public class DataRetrievalException extends RuntimeException {
++
++	/**
++	 * 
++	 */
++	private static final long serialVersionUID = 4978058839277657L;
++
++	public DataRetrievalException() {
++		super();
++	}
++
++	public DataRetrievalException(String message) {
++		super(message);
++	}
++
++	public DataRetrievalException(Throwable cause) {
++		super(cause);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java
+new file mode 100755
+index 0000000..8dc2579
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.connectivity;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
++
++public interface DataSetRetriever {
++
++	void setMetadataStore(MetadataStore mds);
++
++	boolean canRetrieveDataSet(DataSet oMDataSet);
++	
++	MaterializedDataSet retrieveRelationalDataSet(RelationalDataSet relationalDataSet) throws DataRetrievalException;
++	
++	void createCsvFile(RelationalDataSet relationalDataSet, String fileName) throws DataRetrievalException;
++
++	JDBCRetrievalResult retrieveTableAsJDBCResultSet(Table oMTable) throws DataRetrievalException;
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java
+new file mode 100755
+index 0000000..6846ba5
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java
+@@ -0,0 +1,298 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.connectivity;
++
++import java.io.File;
++import java.io.PrintWriter;
++import java.net.URL;
++import java.nio.charset.Charset;
++import java.sql.Connection;
++import java.sql.DriverManager;
++import java.sql.PreparedStatement;
++import java.sql.ResultSet;
++import java.sql.ResultSetMetaData;
++import java.sql.SQLException;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.commons.csv.CSVFormat;
++import org.apache.commons.csv.CSVParser;
++import org.apache.commons.csv.CSVRecord;
++
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
++
++/**
++ * This class is a helper to retrieve actual data from a data source by passing an object that represents a reference to the dataset.
++ *
++ */
++public class DataSetRetrieverImpl implements DataSetRetriever {
++
++	Logger logger = Logger.getLogger(DataSetRetrieverImpl.class.getName());
++	MetadataStore metaDataStore;
++
++	public DataSetRetrieverImpl() {
++	}
++
++	public DataSetRetrieverImpl(MetadataStore metaDataStore) {
++		this.metaDataStore = metaDataStore;
++	}
++
++	@Override
++	public void setMetadataStore(MetadataStore mds) {
++		this.metaDataStore = mds;
++	}
++
++	@Override
++	public boolean canRetrieveDataSet(DataSet dataSet) {
++		if (dataSet instanceof DataFile) {
++			DataFile dataFile = (DataFile) dataSet;
++			return getValidURL(dataFile) != null;
++		} else if (dataSet instanceof Table) {
++			Connection connection = getJDBCConnection((JDBCConnectionInfo) metaDataStore.getConnectionInfo(dataSet));
++			if (connection != null) {
++				try {
++					connection.close();
++					return true;
++				} catch (SQLException e) {
++					// do nothing
++				}
++			}
++		}
++		return false;
++	}
++
++	@Override
++	public MaterializedDataSet retrieveRelationalDataSet(RelationalDataSet relationalDataSet) {
++		if (relationalDataSet instanceof DataFile) {
++			return retrieveDataFile((DataFile) relationalDataSet);
++		} else if (relationalDataSet instanceof Table) {
++			return retrieveTableWithJDBC((Table) relationalDataSet);
++		}
++		return null;
++	}
++
++	@Override
++	public void createCsvFile(RelationalDataSet relationalDataSet, String fileName) {
++		try {
++			logger.log(Level.INFO, "Creating CSV input data file ", fileName);
++			MaterializedDataSet mds = retrieveRelationalDataSet(relationalDataSet);
++			PrintWriter printWriter = new PrintWriter(new File(fileName), "UTF-8") ;
++			int columnCount = mds.getColumns().size();
++			String headers = "\"" + mds.getColumns().get(0).getName() + "\"" ;
++			for (int i = 1; i < columnCount; i++) {
++				headers += ",\"" + mds.getColumns().get(i).getName() + "\"" ;
++			}
++			printWriter.println(headers);
++			for (int i = 0; i < mds.getData().size(); i++) {
++				String row = "\"" + mds.getData().get(i).get(0).toString() + "\"";
++				for (int j = 1 ; j < columnCount; j++ ) {
++					row += ",\"" + mds.getData().get(i).get(j).toString() + "\"";
++				}
++				printWriter.println(row);
++			}
++			printWriter.close();
++		} catch(Exception exc) {
++			throw new DataRetrievalException(exc);
++		}
++	}
++
++	private URL getValidURL(DataFile dataFile) {
++		try {
++			Charset.forName(dataFile.getEncoding());
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, MessageFormat.format("Encoding ''{0}'' of data file ''{1}''is not valid''", new Object[] { dataFile.getEncoding(), dataFile.getUrlString() }), exc);
++			return null;
++		}
++		String urlString = dataFile.getUrlString();
++		try {
++			URL url = new URL(urlString);
++			url.openConnection().connect();
++			return url;
++		} catch (Exception exc) {
++			String msg = MessageFormat.format("Could not connect to data file URL ''{0}''. Error: {1}", new Object[] { urlString, exc.getMessage() });
++			logger.log(Level.WARNING, msg, exc);
++			return null;
++		}
++	}
++
++	private MaterializedDataSet retrieveDataFile(DataFile dataFile) throws DataRetrievalException {
++		URL url = this.getValidURL(dataFile);
++		if (url == null) {
++			return null;
++		}
++		List<Column> columns = metaDataStore.getColumns(dataFile);
++		List<List<Object>> data = new ArrayList<>();
++
++		try {
++			CSVParser csvParser = CSVParser.parse(url, Charset.forName(dataFile.getEncoding()), CSVFormat.DEFAULT.withHeader());
++			List<CSVRecord> records = csvParser.getRecords();
++			Map<String, Integer> headerMap = csvParser.getHeaderMap();
++			csvParser.close();
++
++			for (CSVRecord record : records) {
++				List<Object> targetRecord = new ArrayList<>();
++				for (int i = 0; i < columns.size(); i++) {
++					Column col = columns.get(i);
++					String value = record.get(headerMap.get(col.getName()));
++					Object convertedValue = value;
++					if (col.getDataType().equals("int")) {
++						convertedValue = Integer.parseInt(value);
++					} else if (col.getDataType().equals("double")) {
++						convertedValue = Double.parseDouble(value);
++					}
++					// TODO add more conversions
++					targetRecord.add(convertedValue);
++				}
++				data.add(targetRecord);
++			}
++
++		} catch (Exception exc) {
++			throw new DataRetrievalException(exc);
++		}
++
++		MaterializedDataSet materializedDS = new MaterializedDataSet();
++		materializedDS.setTable(dataFile);
++		materializedDS.setColumns(columns);
++		materializedDS.setData(data);
++		return materializedDS;
++	}
++
++	public static String quoteForJDBC(String s) {
++		// TODO implement to prevent SQL injection
++		return s;
++	}
++
++	Connection getJDBCConnection(JDBCConnectionInfo connectionInfo) {
++		if ((connectionInfo.getConnections() == null) || connectionInfo.getConnections().isEmpty()) {
++			return null;
++		}
++		JDBCConnection connectionObject = null;
++		connectionObject = (JDBCConnection) connectionInfo.getConnections().get(0); // Use first connection
++		try {
++			return DriverManager.getConnection(connectionObject.getJdbcConnectionString(), connectionObject.getUser(), connectionObject.getPassword());
++		} catch (SQLException exc) {
++			logger.log(Level.WARNING, MessageFormat.format("JDBC connection to ''{0}'' for table ''{1}'' could not be created", new Object[] { connectionObject.getJdbcConnectionString(),
++					connectionInfo.getSchemaName() + "." + connectionInfo.getTableName() }), exc);
++			return null;
++		}
++	}
++
++	private MaterializedDataSet retrieveTableWithJDBC(Table table) {
++
++		JDBCRetrievalResult jdbcRetrievalResult = this.retrieveTableAsJDBCResultSet(table);
++		if (jdbcRetrievalResult == null) {
++			logger.log(Level.FINE, "JDBC retrieval result for table ''{0}'' is null", table.getReference().getUrl());
++			return null;
++		}
++
++		Map<String, Column> columnMap = new HashMap<String, Column>();
++		for (Column column : metaDataStore.getColumns(table)) {
++			columnMap.put(column.getName(), column);
++		}
++		logger.log(Level.INFO, "Table columns: {0}", columnMap.keySet());
++
++		ResultSet rs = null;
++		try {
++			logger.log(Level.FINE, "Executing prepared statement " + jdbcRetrievalResult.getPreparedStatement());
++			rs = jdbcRetrievalResult.getPreparedStatement().executeQuery();
++			ResultSetMetaData rsmd = rs.getMetaData();
++			List<Column> resultSetColumns = new ArrayList<>();
++			int columnCount = rsmd.getColumnCount();
++			for (int i = 1; i <= columnCount; i++) {
++				Column col = new Column();
++				col.setName(rsmd.getColumnName(i));
++				col.setDataType(rsmd.getColumnTypeName(i));
++				Column retrievedColumn = columnMap.get(col.getName());
++				if (retrievedColumn != null && retrievedColumn.getReference() != null) {
++					col.setReference(retrievedColumn.getReference());
++				} else {
++					logger.log(Level.WARNING, "Error setting reference on column, this can cause issues when annotations are created on the column!");
++				}
++				resultSetColumns.add(col);
++			}
++
++			List<List<Object>> data = new ArrayList<>();
++			while (rs.next()) {
++				List<Object> row = new ArrayList<>();
++				for (int i = 1; i <= columnCount; i++) {
++					row.add(rs.getObject(i));
++				}
++				data.add(row);
++			}
++
++			MaterializedDataSet result = new MaterializedDataSet();
++			result.setTable(table);
++			result.setColumns(resultSetColumns);
++			result.setData(data);
++			return result;
++		} catch (SQLException exc) {
++			throw new DataRetrievalException(exc);
++		} finally {
++			try {
++				if (rs != null) {
++					rs.close();
++				}
++				jdbcRetrievalResult.close();
++			} catch (SQLException exc) {
++				throw new DataRetrievalException(exc);
++			}
++		}
++
++	}
++
++	@Override
++	public JDBCRetrievalResult retrieveTableAsJDBCResultSet(Table table) {
++		JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) this.metaDataStore.getConnectionInfo(table);
++		Connection connection = null;
++		PreparedStatement stat = null;
++		try {
++			connection = this.getJDBCConnection(connectionInfo);
++			if (connection == null) {
++				logger.log(Level.FINE, "No jdbc connection found for table ''{0}'' (''{1}'')", new Object[]{table.getName(), table.getReference().getUrl()});
++				return null;
++			}
++			String schemaName = connectionInfo.getSchemaName();
++			String sql = "select * from " + quoteForJDBC(schemaName) + "." + quoteForJDBC(table.getName());
++			logger.log(Level.FINER, "Running JDBC statement: ''{0}''", sql);
++			stat = connection.prepareStatement(sql);
++			return new JDBCRetrievalResult(connection, stat);
++		} catch (SQLException exc) {
++			String msg = MessageFormat.format("An SQL exception occurred when preparing data access for table ''{0}'' ({1})", new Object[]{table.getName(), table.getReference().getUrl()});
++			logger.log(Level.WARNING, msg, exc);
++			try {
++				if (connection != null) {
++					connection.close();
++				}
++			} catch (SQLException exc2) {
++				// do nothing
++				logger.log(Level.WARNING, msg, exc2);
++				throw new DataRetrievalException(exc2);
++			}
++			throw new DataRetrievalException(exc);
++		}
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java
+new file mode 100755
+index 0000000..b418eb0
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java
+@@ -0,0 +1,48 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.connectivity;
++
++import java.sql.Connection;
++import java.sql.PreparedStatement;
++import java.sql.SQLException;
++
++public class JDBCRetrievalResult {
++
++	private Connection connection;
++	private PreparedStatement preparedStatement;
++
++	public JDBCRetrievalResult(Connection connection, PreparedStatement preparedStatement) {
++		super();
++		this.connection = connection;
++		this.preparedStatement = preparedStatement;
++	}
++
++	public Connection getConnection() {
++		return connection;
++	}
++
++	public PreparedStatement getPreparedStatement() {
++		return preparedStatement;
++	}
++
++	public void close() throws SQLException {
++		if (preparedStatement != null) {
++			preparedStatement.close();
++		}
++		if (connection != null) {
++			connection.close();
++		}
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java
+new file mode 100755
+index 0000000..7946084
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java
+@@ -0,0 +1,88 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.connectivity;
++
++import java.net.URI;
++import java.security.GeneralSecurityException;
++import java.security.cert.X509Certificate;
++import java.util.logging.Logger;
++
++import javax.net.ssl.SSLContext;
++
++import org.apache.http.auth.UsernamePasswordCredentials;
++import org.apache.http.client.HttpClient;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.conn.ssl.NoopHostnameVerifier;
++import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
++import org.apache.http.impl.client.HttpClientBuilder;
++import org.apache.http.ssl.SSLContextBuilder;
++import org.apache.http.ssl.TrustStrategy;
++
++/**
++ * 
++ * This is a helper class to authenticate http requests
++ *
++ */
++public class RESTClientManager {
++
++	Logger logger = Logger.getLogger(RESTClientManager.class.getName());
++
++	private Executor executor = null;
++
++	private URI baseUrl;
++	private String user;
++	private String password;
++
++	public RESTClientManager(URI baseUrl, String user, String password) {
++		this.baseUrl = baseUrl;
++		this.user = user;
++		this.password = password;
++	}
++
++	public RESTClientManager(URI baseUrl) {
++		this(baseUrl, null, null);
++	}
++
++	public Executor getAuthenticatedExecutor() throws GeneralSecurityException {
++		if (executor != null) {
++			return executor;
++		}
++		// TODO always accept the certificate for now but do proper certificate stuff in the future 
++		TrustStrategy acceptAllTrustStrategy = new TrustStrategy() {
++			@Override
++			public boolean isTrusted(X509Certificate[] certificate, String authType) {
++				return true;
++			}
++		};
++		SSLContextBuilder contextBuilder = new SSLContextBuilder();
++		SSLContext context = contextBuilder.loadTrustMaterial(null, acceptAllTrustStrategy).build();
++		SSLConnectionSocketFactory scsf = new SSLConnectionSocketFactory(context, new NoopHostnameVerifier());
++
++		HttpClient httpClient = HttpClientBuilder.create() //
++				.setSSLSocketFactory(scsf) //
++				.build();
++
++		if (this.user != null) {
++			if (this.baseUrl == null) {
++				executor = Executor.newInstance(httpClient).auth(new UsernamePasswordCredentials(this.user, this.password));
++			} else {
++				executor = Executor.newInstance(httpClient).auth(this.baseUrl.getHost(), new UsernamePasswordCredentials(this.user, this.password));
++			}
++		} else {
++			executor = Executor.newInstance(httpClient);
++		}
++		return executor;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java
+new file mode 100755
+index 0000000..23ef661
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java
+@@ -0,0 +1,130 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus;
++
++// JSON
++@ApiModel(description="Container for tracking the status of an analysis request.")
++public class AnalysisRequestTracker {
++
++	@ApiModelProperty(value="Analysis request", required=true)
++	private AnalysisRequest request;
++
++	@ApiModelProperty(value="List of discovery service requests that make up the analysis request", required=true)
++	private List<DiscoveryServiceRequest> discoveryServiceRequests = new ArrayList<DiscoveryServiceRequest>();
++
++	@ApiModelProperty(value="List of responses, one for each discovery service request", required=true)
++	private List<DiscoveryServiceResponse> discoveryServiceResponses = new ArrayList<DiscoveryServiceResponse>();
++
++	@ApiModelProperty(value="Status of the analysis request", required=true)
++	private AnalysisRequestTrackerStatus.STATUS status = AnalysisRequestTrackerStatus.STATUS.INITIALIZED;
++
++	@ApiModelProperty(value="Detailed status of the analysis request", required=false)
++	private String statusDetails;
++
++	@ApiModelProperty(value="Timestamp of last status update", required=true)
++	private long lastModified;
++
++	@ApiModelProperty(value="User who has submitted the analysis request", required=true)
++	private String user;
++
++	// A tracker object is used to publish changes across all ODF nodes. When writing a tracker on the queue,
++	// a revision is added so that we know when a tracker has successfully been stored in the ODF that wrote it.
++	// This is necessary to make storing of these trackers a synchronous method.
++	@ApiModelProperty(value="Internal revision id of the analysis request", required=true)
++	private String revisionId;
++
++	@ApiModelProperty(value="Next discovery service request to be issued")
++	private int nextDiscoveryServiceRequest = 0;
++
++	public String getUser() {
++		return user;
++	}
++
++	public void setUser(String user) {
++		this.user = user;
++	}
++
++	public long getLastModified() {
++		return lastModified;
++	}
++
++	public void setLastModified(long lastModified) {
++		this.lastModified = lastModified;
++	}
++
++	public List<DiscoveryServiceRequest> getDiscoveryServiceRequests() {
++		return discoveryServiceRequests;
++	}
++
++	public void setDiscoveryServiceRequests(List<DiscoveryServiceRequest> discoveryServiceRequests) {
++		this.discoveryServiceRequests = discoveryServiceRequests;
++	}
++
++	public int getNextDiscoveryServiceRequest() {
++		return nextDiscoveryServiceRequest;
++	}
++
++	public void setNextDiscoveryServiceRequest(int nextDiscoveryServiceRequest) {
++		this.nextDiscoveryServiceRequest = nextDiscoveryServiceRequest;
++	}
++
++	public AnalysisRequest getRequest() {
++		return request;
++	}
++
++	public void setRequest(AnalysisRequest request) {
++		this.request = request;
++	}
++
++	public AnalysisRequestTrackerStatus.STATUS getStatus() {
++		return status;
++	}
++
++	public void setStatus(AnalysisRequestTrackerStatus.STATUS status) {
++		this.status = status;
++	}
++
++	public String getStatusDetails() {
++		return statusDetails;
++	}
++
++	public void setStatusDetails(String statusDetails) {
++		this.statusDetails = statusDetails;
++	}
++
++	public List<DiscoveryServiceResponse> getDiscoveryServiceResponses() {
++		return discoveryServiceResponses;
++	}
++
++	public void setDiscoveryServiceResponses(List<DiscoveryServiceResponse> discoveryServiceResponses) {
++		this.discoveryServiceResponses = discoveryServiceResponses;
++	}
++
++	public String getRevisionId() {
++		return revisionId;
++	}
++
++	public void setRevisionId(String revisionId) {
++		this.revisionId = revisionId;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java
+new file mode 100755
+index 0000000..3e46e83
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java
+@@ -0,0 +1,54 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * 
++ * An object of this class must be returned by a services checkDataSet method.
++ *
++ */
++@ApiModel(description="Result returned by REST-based discovery services that indicates whether a dataset can be processed by the service.")
++public class DataSetCheckResult {
++
++	public static enum DataAccess {
++		NotPossible,
++		Possible
++	};
++
++	@ApiModelProperty(value="Indicates whether a dataset can be accessed by a discovery service, i.e. whether access is possible or not", readOnly=true, required=true)
++	private DataAccess dataAccess = DataAccess.Possible;
++
++	@ApiModelProperty(value="Message explaining why access to the dataset is not possible", readOnly=true)
++	private String details;
++
++	public DataAccess getDataAccess() {
++		return dataAccess;
++	}
++
++	public void setDataAccess(DataAccess dataAccess) {
++		this.dataAccess = dataAccess;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java
+new file mode 100755
+index 0000000..99366e7
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.concurrent.ExecutorService;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++
++/**
++ * Every kind of discovery service must implement this interface
++ * For java services, the executor service can be used to start / manage threads with credentials of the current ODF user
++ * The metadata store can be used to access metadata required by the service.
++ *
++ */
++public interface DiscoveryService {
++
++	void setExecutorService(ExecutorService executorService);
++	
++	void setMetadataStore(MetadataStore metadataStore);
++	void setAnnotationStore(AnnotationStore annotationStore);
++
++    /**
++     * Checks whether a data set can be processed by the discovery service.
++     * 
++     * @param dataSetContainer Data set container that contains a reference to the data set to be accessed
++     * @return Status information whether access to the data set is possible or not
++     */
++	DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java
+new file mode 100755
+index 0000000..db73966
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java
+@@ -0,0 +1,54 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.concurrent.ExecutorService;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++
++/**
++ * A discovery service base class that services can subclass for convenience.
++ * 
++ *
++ */
++public abstract class DiscoveryServiceBase implements DiscoveryService {
++	protected ExecutorService executorService;
++	protected MetadataStore metadataStore;
++	protected AnnotationStore annotationStore;
++
++	@Override
++	public void setExecutorService(ExecutorService executorService) {
++		this.executorService = executorService;
++	}
++
++	@Override
++	public void setMetadataStore(MetadataStore metadataStore) {
++		this.metadataStore = metadataStore;
++	}
++
++	@Override
++	public void setAnnotationStore(AnnotationStore annotationStore) {
++		this.annotationStore = annotationStore;
++	}
++
++	@Override
++	public DataSetCheckResult checkDataSet(DataSetContainer dataSet) {
++		DataSetCheckResult result = new DataSetCheckResult();
++		result.setDataAccess(DataSetCheckResult.DataAccess.Possible);
++		return result;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java
+new file mode 100755
+index 0000000..d3f63e8
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java
+@@ -0,0 +1,50 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.HashMap;
++import java.util.Map;
++
++import org.apache.wink.json4j.JSONException;
++
++import com.fasterxml.jackson.annotation.JsonAnyGetter;
++import com.fasterxml.jackson.annotation.JsonAnySetter;
++
++import io.swagger.annotations.ApiModel;
++
++//JSON
++@ApiModel(description="Endpoint of the discovery service.")
++public class DiscoveryServiceEndpoint {
++	private String runtimeName;
++	private Map<String, Object> props = new HashMap<>();
++
++	public String getRuntimeName() {
++		return runtimeName;
++	}
++
++	public void setRuntimeName(String runtimeName) {
++		this.runtimeName = runtimeName;
++	}
++	
++	@JsonAnyGetter
++	public Map<String, Object> get() {
++		return props;
++	}
++
++	@JsonAnySetter
++	public void set(String name, Object value) {
++		props.put(name, value);
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java
+new file mode 100755
+index 0000000..8e79511
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java
+@@ -0,0 +1,50 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++/**
++ * 
++ * This class represents a java ODF discovery service endpoint. 
++ * Note: It doesn't inherit from DiscoveryServiceEndpoint. To convert this from / to this class use JSONUtils.convert()
++ * 
++ */
++public class DiscoveryServiceJavaEndpoint {
++
++	private String runtimeName;
++	/*
++	 * The class name identifies a class that must be available on the classpath and implements the ODF service interface
++	 */
++	private String className;
++
++	public DiscoveryServiceJavaEndpoint() {
++		this.setRuntimeName("Java");
++	}
++	
++	public String getClassName() {
++		return className;
++	}
++
++	public void setClassName(String className) {
++		this.className = className;
++	}
++
++	public String getRuntimeName() {
++		return runtimeName;
++	}
++
++	public void setRuntimeName(String runtimeName) {
++		this.runtimeName = runtimeName;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java
+new file mode 100755
+index 0000000..00e28e2
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java
+@@ -0,0 +1,93 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.io.InputStream;
++import java.util.List;
++
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++/**
++ *
++ * External Java API for creating and managing discovery services
++ *
++ */
++public interface DiscoveryServiceManager {
++
++	/**
++	 * Retrieve list of discovery services registered in ODF
++	 * @return List of registered ODF discovery services
++	 */
++	public List<DiscoveryServiceProperties> getDiscoveryServicesProperties();
++
++	/**
++	 * Register a new service in ODF
++	 * @param dsProperties Properties of the discovery service to register
++	 * @throws ValidationException Validation of a property failed
++	 */
++	public void createDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException;
++
++	/**
++	 * Update configuration of an ODF discovery service
++	 * @param dsProperties Properties of the discovery service to update
++	 */
++	public void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ServiceNotFoundException, ValidationException;
++
++	/**
++	 * Remove a registered service from ODF
++	 * @param serviceId Discovery service ID
++	 */
++	public void deleteDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException;
++
++	/**
++	 * Retrieve current configuration of a discovery services registered in ODF
++	 * @param serviceId Discovery Service ID
++	 * @return Properties of the service with this ID
++	 * @throws ServiceNotFoundException A service with this ID is not registered
++	 */
++	public DiscoveryServiceProperties getDiscoveryServiceProperties(String serviceId) throws ServiceNotFoundException;
++
++	/**
++	 * Retrieve status overview of all discovery services registered in ODF
++	 * @return List of status count maps for all discovery services
++	 */
++	public List<ServiceStatusCount> getDiscoveryServiceStatusOverview();
++
++	/**
++	 * Retrieve status of a specific discovery service. Returns null if no service info can be obtained
++	 * @param serviceId Discovery Service ID
++	 * @return Status of the service with this ID
++	 */
++	public DiscoveryServiceStatus getDiscoveryServiceStatus(String serviceId) throws ServiceNotFoundException;
++
++	/**
++	 * Retrieve runtime statistics of a specific discovery service
++	 * @param serviceId Discovery Service ID
++	 * @return Runtime statistics of the service with this ID
++	 */
++	public DiscoveryServiceRuntimeStatistics getDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException;
++
++	/**
++	 * Delete runtime statistics of a specific discovery service
++	 * @param serviceId Discovery Service ID
++	 */
++	public void deleteDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException;
++
++	/**
++	 * Retrieve picture representing a discovery service
++	 * @param serviceId Discovery Service ID
++	 * @return Input stream for image
++	 */
++	public InputStream getDiscoveryServiceImage(String serviceId) throws ServiceNotFoundException;
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java
+new file mode 100755
+index 0000000..78989ec
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java
+@@ -0,0 +1,173 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * 
++ * This class is used for the registration of a service at an ODF instance.
++ * A JSON document of this type must be provided by a remote discovery service implementation in order to register it with an ODF instance
++ *
++ */
++@ApiModel(description="Parameters describing a discovery service.")
++public class DiscoveryServiceProperties {
++	@ApiModelProperty(value="Unique id string of the discovery service", required=true)
++	private String id;
++
++	@ApiModelProperty(value="Descriptive name of the discovery service", required=true)
++	private String name;
++
++	@ApiModelProperty(value="Optional description of the discovery service")
++	private String description;
++
++	@ApiModelProperty(value="Optional custom description of the discovery service")
++	private String customDescription;
++
++	@ApiModelProperty(value="Optional link to a JPG or PNG image illustrating the discovery service")
++	private String iconUrl;
++
++	@ApiModelProperty(value="Optional URL pointing to the description of the discovery service")
++	private String link;
++	
++	public String getCustomDescription() {
++		return customDescription;
++	}
++
++	public void setCustomDescription(String customDescription) {
++		this.customDescription = customDescription;
++	}
++
++	public List<String> getPrerequisiteAnnotationTypes() {
++		return prerequisiteAnnotationTypes;
++	}
++
++	public void setPrerequisiteAnnotationTypes(List<String> prerequisiteAnnotationTypes) {
++		this.prerequisiteAnnotationTypes = prerequisiteAnnotationTypes;
++	}
++
++	public List<String> getResultingAnnotationTypes() {
++		return resultingAnnotationTypes;
++	}
++
++	public void setResultingAnnotationTypes(List<String> resultingAnnotationTypes) {
++		this.resultingAnnotationTypes = resultingAnnotationTypes;
++	}
++
++	public List<String> getSupportedObjectTypes() {
++		return supportedObjectTypes;
++	}
++
++	public void setSupportedObjectTypes(List<String> supportedObjectTypes) {
++		this.supportedObjectTypes = supportedObjectTypes;
++	}
++
++	public List<String> getAssignedObjectTypes() {
++		return assignedObjectTypes;
++	}
++
++	public void setAssignedObjectTypes(List<String> assignedObjectTypes) {
++		this.assignedObjectTypes = assignedObjectTypes;
++	}
++
++	public List<String> getAssignedObjectCandidates() {
++		return assignedObjectCandidates;
++	}
++
++	public void setAssignedObjectCandidates(List<String> assignedObjectCandidates) {
++		this.assignedObjectCandidates = assignedObjectCandidates;
++	}
++
++	@ApiModelProperty(value="List of prerequisite annotation types required to run the discovery service")
++	private List<String> prerequisiteAnnotationTypes;
++
++	@ApiModelProperty(value="List annotation types created by the discovery service")
++	private List<String> resultingAnnotationTypes;
++
++	@ApiModelProperty(value="Types of objects that can be analyzed by the discovery service")
++	private List<String> supportedObjectTypes;
++
++	@ApiModelProperty(value="Types of objects that may be assigned to the resulting annotations")
++	private List<String> assignedObjectTypes;
++
++	@ApiModelProperty(value="Ids of specific objects (e.g. data classes) that may be assigned to resulting annotations")
++	private List<String> assignedObjectCandidates;
++
++	@ApiModelProperty(value = "Number of parallel analyses the service can handle, with a default of 2")
++	private Integer parallelismCount = 2;
++
++	@ApiModelProperty(value="Endpoint of the discovery service", required=true)
++	private DiscoveryServiceEndpoint endpoint;
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public String getName() {
++		return name;
++	}
++
++	public void setName(String name) {
++		this.name = name;
++	}
++
++	public String getDescription() {
++		return description;
++	}
++
++	public void setDescription(String description) {
++		this.description = description;
++	}
++
++	public String getIconUrl() {
++		return iconUrl;
++	}
++
++	public void setIconUrl(String iconURL) {
++		this.iconUrl = iconURL;
++	}
++
++	public String getLink() {
++		return link;
++	}
++
++	public void setLink(String link) {
++		this.link = link;
++	}
++
++	public DiscoveryServiceEndpoint getEndpoint() {
++		return endpoint;
++	}
++
++	public void setEndpoint(DiscoveryServiceEndpoint endpoint) {
++		this.endpoint = endpoint;
++	}
++
++	public Integer getParallelismCount() {
++		return parallelismCount;
++	}
++
++	public void setParallelismCount(Integer parallelismCount) {
++		this.parallelismCount = parallelismCount;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java
+new file mode 100755
+index 0000000..a922a08
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * JSON object representing properties of registered discovery services.
++ *  
++ */
++
++@ApiModel(description="List of properties of registered discovery services")
++public class DiscoveryServicePropertiesList {
++	
++	@ApiModelProperty(value="List of properties of registered discovery services", readOnly=true)
++	DiscoveryServiceProperties[] items;
++
++	@ApiModelProperty(value="Number of items in the list", readOnly=true)
++	int count;
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java
+new file mode 100755
+index 0000000..392ca82
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java
+@@ -0,0 +1,179 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.Map;
++
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * 
++ * This class represents an analysis request that is passed from ODF to the service.
++ *
++ */
++@ApiModel(description="Request for running a single discovery service.")
++public class DiscoveryServiceRequest {
++	/**
++	 * The discoveryService identifier
++	 */
++	@ApiModelProperty(value="Id string of the discovery service to be issued", required=true)
++	private String discoveryServiceId;
++	/**
++	 * This property can be used by a user to pass additional information from the analysis request to the service execution
++	 */
++	@ApiModelProperty(value="Optional additional properties to be passed to the discovery service", required=false)
++	private Map<String, Object> additionalProperties;
++
++	@ApiModelProperty(value="User id under which the discovery service is supposed to run", required=true)
++	private String user;
++	/**
++	 * This property contains information about the data that is supposed to be analysed
++	 */
++	@ApiModelProperty(value="Data set to be analyzed along with cached metadata objects", required=true)
++	private DataSetContainer dataSetContainer;
++
++	@ApiModelProperty(value="Unique id of the analysis request to which the discovery service request belongs to", required=true)
++	private String odfRequestId;
++
++	@ApiModelProperty(value="URL of ODF admin API for remote access to metadata", required=false)
++	private String odfUrl;
++
++	@ApiModelProperty(value="ODF user id for remote access to metadata", required=false)
++	private String odfUser;
++
++	@ApiModelProperty(value="ODF password for remote access to metadata", required=false)
++	private String odfPassword;
++	/**
++	 * timestamp of the time the request was put on the ODF request queue
++	 */
++	@ApiModelProperty(value="Timestamp when the request was put on ODF request queue", required=true)
++	private long putOnRequestQueue;
++	/**
++	 * timestamp of the time the request was taken from the queue and execution was started
++	 */
++	@ApiModelProperty(value="Timestamp when the execution was started", required=true)
++	private long takenFromRequestQueue;
++	/**
++	 * timestamp of the time the request was processed successfully
++	 */
++	@ApiModelProperty(value="Timestamp when processing was finished", required=true)
++	private long finishedProcessing;
++	/**
++	 * duration needed for storing the analysis results
++	 */
++	@ApiModelProperty(value="Time needed for storing results in metadata repository", required=true)
++	private long timeSpentStoringResults;
++
++	public String getDiscoveryServiceId() {
++		return discoveryServiceId;
++	}
++
++	public void setDiscoveryServiceId(String discoveryServiceId) {
++		this.discoveryServiceId = discoveryServiceId;
++	}
++
++	public Map<String, Object> getAdditionalProperties() {
++		return additionalProperties;
++	}
++
++	public void setAdditionalProperties(Map<String, Object> additionalProperties) {
++		this.additionalProperties = additionalProperties;
++	}
++
++	public String getUser() {
++		return user;
++	}
++
++	public void setUser(String user) {
++		this.user = user;
++	}
++
++	public DataSetContainer getDataSetContainer() {
++		return dataSetContainer;
++	}
++
++	public void setDataSetContainer(DataSetContainer dataSet) {
++		this.dataSetContainer = dataSet;
++	}
++
++	public String getOdfRequestId() {
++		return odfRequestId;
++	}
++
++	public void setOdfRequestId(String odfRequestId) {
++		this.odfRequestId = odfRequestId;
++	}
++
++	public String getOdfUrl() {
++		return odfUrl;
++	}
++
++	public void setOdfUrl(String odfUrl) {
++		this.odfUrl = odfUrl;
++	}
++
++	public String getOdfUser() {
++		return this.odfUser;
++	}
++
++	public void setOdfUser(String odfUser) {
++		this.odfUser = odfUser;
++	}
++
++	public String getOdfPassword() {
++		return this.odfPassword;
++	}
++
++	public void setOdfPassword(String odfPassword) {
++		this.odfPassword = odfPassword;
++	}
++
++	public long getFinishedProcessing() {
++		return finishedProcessing;
++	}
++
++	public void setFinishedProcessing(long finishedProcessing) {
++		this.finishedProcessing = finishedProcessing;
++	}
++
++	public long getTakenFromRequestQueue() {
++		return takenFromRequestQueue;
++	}
++
++	public void setTakenFromRequestQueue(long takenFromRequestQueue) {
++		this.takenFromRequestQueue = takenFromRequestQueue;
++	}
++
++	public long getPutOnRequestQueue() {
++		return putOnRequestQueue;
++	}
++
++	public void setPutOnRequestQueue(long putOnRequestQueue) {
++		this.putOnRequestQueue = putOnRequestQueue;
++	}
++
++	public long getTimeSpentStoringResults() {
++		return timeSpentStoringResults;
++	}
++
++	public void setTimeSpentStoringResults(long timeSpentStoringResults) {
++		this.timeSpentStoringResults = timeSpentStoringResults;
++	}
++
++}
++
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java
+new file mode 100755
+index 0000000..8208744
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java
+@@ -0,0 +1,62 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import com.fasterxml.jackson.annotation.JsonSubTypes;
++import com.fasterxml.jackson.annotation.JsonSubTypes.Type;
++import com.fasterxml.jackson.annotation.JsonTypeInfo;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++@JsonTypeInfo(  
++	    use = JsonTypeInfo.Id.NAME,  
++	    include = JsonTypeInfo.As.PROPERTY,  
++	    property = "type")
++
++@JsonSubTypes({  
++    @Type(value = DiscoveryServiceAsyncStartResponse.class, name = "async"),  
++    @Type(value = DiscoveryServiceSyncResponse.class, name = "sync") })  
++@ApiModel(description="Response returned by the discovery service.", subTypes={DiscoveryServiceAsyncStartResponse.class,DiscoveryServiceSyncResponse.class}, discriminator="type")
++public abstract class DiscoveryServiceResponse {
++	public static enum ResponseCode {
++		OK, NOT_AUTHORIZED, TEMPORARILY_UNAVAILABLE, UNKNOWN_ERROR
++	};
++
++	@ApiModelProperty(value="Response code indicating whether the discovery service request was issued successfully", readOnly=true, required=true)
++	private ResponseCode code;
++
++	@ApiModelProperty(value="Detailed status of the analysis request", readOnly=true, required=false)
++	private String details;
++
++	public ResponseCode getCode() {
++		return code;
++	}
++
++	public void setCode(ResponseCode code) {
++		this.code = code;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java
+new file mode 100755
+index 0000000..5c7fff9
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java
+@@ -0,0 +1,46 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * 
++ * This class must be returned by a service so that ODF can store the results.
++ * 
++ */
++@ApiModel(description="Results of a discovery service run.")
++public class DiscoveryServiceResult {
++
++	/**
++	 * The actual results of the service execution
++	 */
++	@ApiModelProperty(value="List of annotations generated by the discovery service run (following the format of the annotationPrototypes)", readOnly=true)
++	private List<Annotation> annotations;
++
++	public List<Annotation> getAnnotations() {
++		return annotations;
++	}
++
++	public void setAnnotations(List<Annotation> annotations) {
++		this.annotations = annotations;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java
+new file mode 100755
+index 0000000..15127e3
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * JSON object representing runtime statics of a discovery service.
++ * 
++ *
++ */
++
++@ApiModel(description="Runtime statistics of a discovery service")
++public class DiscoveryServiceRuntimeStatistics {
++	
++	// TODO: placeholder for things to add
++	@ApiModelProperty(value="Average processing time per item (in milliseconds)", readOnly=true)
++	long averageProcessingTimePerItemInMillis;
++
++	public long getAverageProcessingTimePerItemInMillis() {
++		return averageProcessingTimePerItemInMillis;
++	}
++
++	public void setAverageProcessingTimePerItemInMillis(long averageProcessingTimePerItemInMillis) {
++		this.averageProcessingTimePerItemInMillis = averageProcessingTimePerItemInMillis;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java
+new file mode 100755
+index 0000000..d377947
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java
+@@ -0,0 +1,79 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++//JSON
++/**
++ * 
++ * This class describes a REST endpoint representing a remote service that can be used by ODF
++ * Note: It doesn't inherit from DiscoveryServiceEndpoint. To convert this from / to this class use JSONUtils.convert()
++ *
++ */
++public class DiscoveryServiceSparkEndpoint {
++	/**
++	 * This property informs ODF about the type of input for the underlying Spark job, (CSV) file vs. (Database) connection.  
++	 */
++	public static enum SERVICE_INTERFACE_TYPE {
++		DataFrame, Generic
++	}
++
++	public static String ANNOTATION_PROPERTY_COLUMN_NAME = "ODF_ANNOTATED_COLUMN";
++	public static String ANNOTATION_SUMMARY_COLUMN_NAME = "ODF_ANNOTATION_SUMMARY";
++	public static String ODF_BEGIN_OF_ANNOTATION_RESULTS = "***ODF_BEGIN_OF_ANNOTATION_RESULTS***\n";
++
++	private String runtimeName;
++
++	private SERVICE_INTERFACE_TYPE inputMethod = null;
++
++	private String jar;
++
++	private String className;
++
++	public DiscoveryServiceSparkEndpoint() {
++		this.setRuntimeName("Spark");
++	}
++	
++	public String getJar() {
++		return jar;
++	}
++
++	public void setJar(String jar) {
++		this.jar = jar;
++	}
++
++	public String getClassName() {
++		return className;
++	}
++
++	public void setClassName(String className) {
++		this.className = className;
++	}
++
++	public SERVICE_INTERFACE_TYPE getInputMethod() {
++		return inputMethod;
++	}
++
++	public void setInputMethod(SERVICE_INTERFACE_TYPE inputMethod) {
++		this.inputMethod = inputMethod;
++	}
++
++	public String getRuntimeName() {
++		return runtimeName;
++	}
++
++	public void setRuntimeName(String runtimeName) {
++		this.runtimeName = runtimeName;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java
+new file mode 100755
+index 0000000..f263a9e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java
+@@ -0,0 +1,62 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of a discovery service")
++public class DiscoveryServiceStatus {
++	public static enum Status {
++		OK, ERROR
++	};
++
++	/**
++	 * JSON object representing the status of a discovery service.
++	 */
++
++	@ApiModelProperty(value="Status of the ODF service", allowableValues="OK,ERROR", readOnly=true, required=true)
++	Status status;
++
++	@ApiModelProperty(value="Status message", readOnly=true, required=true)
++	String message;
++	
++	@ApiModelProperty(value="Status count of the discovery service", readOnly=true, required=true)
++	ServiceStatusCount statusCount;
++
++	public Status getStatus() {
++		return status;
++	}
++
++	public void setStatus(Status status) {
++		this.status = status;
++	}
++
++	public String getMessage() {
++		return message;
++	}
++
++	public void setMessage(String message) {
++		this.message = message;
++	}
++
++	public ServiceStatusCount getStatusCount() {
++		return statusCount;
++	}
++
++	public void setStatusCount(ServiceStatusCount statusCount) {
++		this.statusCount = statusCount;
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java
+new file mode 100755
+index 0000000..c320bf6
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java
+@@ -0,0 +1,38 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.text.MessageFormat;
++
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++public class ServiceNotFoundException extends ValidationException {
++
++	/**
++	 * 
++	 */
++	private static final long serialVersionUID = 1L;
++	private String serviceId;
++	
++	public ServiceNotFoundException(String serviceId) {
++		super("Service not found");
++		this.serviceId = serviceId;
++	}
++
++	@Override
++	public String getMessage() {
++		return MessageFormat.format("Discovery service with id {0} is not registered", serviceId);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java
+new file mode 100755
+index 0000000..85ba444
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java
+@@ -0,0 +1,58 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.HashMap;
++import java.util.Map;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of a discovery service.")
++public class ServiceStatusCount {
++	@ApiModelProperty(value="Id string of the discovery service", readOnly=true, required=true)
++	private String id;
++
++	@ApiModelProperty(value="Descriptive name of the discovery service", readOnly=true, required=true)
++	private String name;
++
++	@ApiModelProperty(value="Status of the discovery service", readOnly=true)
++	private Map<STATUS, Integer> statusCountMap = new HashMap<STATUS, Integer>();
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public Map<STATUS, Integer> getStatusCountMap() {
++		return statusCountMap;
++	}
++
++	public void setStatusCountMap(Map<STATUS, Integer> statusCountMap) {
++		this.statusCountMap = statusCountMap;
++	}
++
++	public String getName() {
++		return name;
++	}
++
++	public void setName(String name) {
++		this.name = name;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java
+new file mode 100755
+index 0000000..ef6666e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java
+@@ -0,0 +1,46 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++
++/**
++ * 
++ * This is an abstract class to extend when creating a synchronous discovery service
++ *
++ */
++public abstract class SyncDiscoveryServiceBase extends DiscoveryServiceBase implements SyncDiscoveryService {
++	
++	protected DiscoveryServiceSyncResponse createSyncResponse(ResponseCode code, String detailsMessage, List<? extends Annotation> annotations) {
++		try {
++			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++			response.setCode(code);
++			response.setDetails(detailsMessage);
++			DiscoveryServiceResult result = new DiscoveryServiceResult();
++			if (annotations != null) {
++				result.setAnnotations((List<Annotation>) annotations);
++			}
++			response.setResult(result);
++			return response;
++		} catch (Exception exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java
+new file mode 100755
+index 0000000..8a98a6b
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java
+@@ -0,0 +1,29 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.async;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++
++
++/**
++ * An asynchronous discovery service must implement this interface
++ *
++ */
++public interface AsyncDiscoveryService extends DiscoveryService {
++	DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request);
++
++	DiscoveryServiceAsyncRunStatus getStatus(String runId);
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java
+new file mode 100755
+index 0000000..49b50ca
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java
+@@ -0,0 +1,77 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.async;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * 
++ * An object of this class must be returned when ODF requests the status of an analysis run.
++ *
++ */
++@ApiModel(description="Status of an asynchronous discovery service run.")
++public class DiscoveryServiceAsyncRunStatus {
++	public static enum State {
++		RUNNING, ERROR, NOT_FOUND, FINISHED
++	}
++
++	@ApiModelProperty(value="Id of the discovery service run", readOnly=true, required=true)
++	private String runId;
++
++	@ApiModelProperty(value="Status of the discovery service run", readOnly=true, required=true)
++	private State state;
++
++	@ApiModelProperty(value="Optional status message", readOnly=true)
++	private String details;
++
++	@ApiModelProperty(value="Result of the discovery service run (if already available)", readOnly=true)
++	private DiscoveryServiceResult result;
++
++	public String getRunId() {
++		return runId;
++	}
++
++	public void setRunId(String runId) {
++		this.runId = runId;
++	}
++
++	public State getState() {
++		return state;
++	}
++
++	public void setState(State state) {
++		this.state = state;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++	public DiscoveryServiceResult getResult() {
++		return result;
++	}
++
++	public void setResult(DiscoveryServiceResult result) {
++		this.result = result;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java
+new file mode 100755
+index 0000000..5d6027d
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java
+@@ -0,0 +1,41 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.async;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * An object of this class must be returned by an asynchronous service after starting
++ *
++ */
++@ApiModel(description="Response returned by an asynchronous discovery service.")
++public class DiscoveryServiceAsyncStartResponse extends DiscoveryServiceResponse {
++	/**
++	 * Property identifying the running analysis. This id will be used to repeatedly request the status of the analysis.
++	 */
++	@ApiModelProperty(value="Id of the analysis request (asynchronous requests only)", readOnly=true, required=true)
++	private String runId;
++
++	public String getRunId() {
++		return runId;
++	}
++
++	public void setRunId(String runId) {
++		this.runId = runId;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java
+new file mode 100755
+index 0000000..ed57357
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java
+@@ -0,0 +1,52 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.datasets;
++
++import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * This class is a reference to a metadata object in a metadata store
++ * 
++ */
++@ApiModel(description="Container keeping reference to data set along with cached metadata objects.")
++public class DataSetContainer {
++
++	@ApiModelProperty(value="Reference to the data set to be analyzed", required=true)
++	private MetaDataObject oMDataSet;
++
++	@ApiModelProperty(value="A Metadata cache that may be used by discovery services if access to the metadata store is not available", required=false)
++	private MetaDataCache metaDataCache;
++
++	public MetaDataObject getDataSet() {
++		return oMDataSet;
++	}
++
++	public void setDataSet(MetaDataObject oMDataSet) {
++		this.oMDataSet = oMDataSet;
++	}
++
++	public MetaDataCache getMetaDataCache() {
++		return metaDataCache;
++	}
++
++	public void setMetaDataCache(MetaDataCache metaDataCache) {
++		this.metaDataCache = metaDataCache;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java
+new file mode 100755
+index 0000000..a00c4eb
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java
+@@ -0,0 +1,57 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.datasets;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++
++// JSON
++/**
++ * This class represents the materialized contents of a data set
++ *
++ */
++public class MaterializedDataSet {
++	private RelationalDataSet table;
++	private List<Column> oMColumns;
++
++	// row data in the same order as the oMColumns 
++	private List<List<Object>> data;
++
++	public List<Column> getColumns() {
++		return oMColumns;
++	}
++
++	public void setColumns(List<Column> oMColumns) {
++		this.oMColumns = oMColumns;
++	}
++
++	public RelationalDataSet getTable() {
++		return table;
++	}
++
++	public void setTable(RelationalDataSet table) {
++		this.table = table;
++	}
++
++	public List<List<Object>> getData() {
++		return data;
++	}
++
++	public void setData(List<List<Object>> data) {
++		this.data = data;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java
+new file mode 100755
+index 0000000..b5b69f4
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java
+@@ -0,0 +1,40 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.sync;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * An object of this class must be returned by a synchronous discovery service
++ *
++ */
++@ApiModel(description="Response returned by a synchronous discovery service.")
++public class DiscoveryServiceSyncResponse extends DiscoveryServiceResponse {
++	@ApiModelProperty(value="Result of the analysis (synchronous requests only)", readOnly=true, required=true)
++	private DiscoveryServiceResult result;
++
++	public DiscoveryServiceResult getResult() {
++		return result;
++	}
++
++	public void setResult(DiscoveryServiceResult result) {
++		this.result = result;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java
+new file mode 100755
+index 0000000..626d78c
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.discoveryservice.sync;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++
++/**
++ * 
++ * Synchronous discovery services must implement this interface
++ *
++ */
++public interface SyncDiscoveryService extends DiscoveryService {
++
++    /**
++     * Runs the actual discovery service.
++     * 
++     * @param request Request parameter that includes a reference to the data set to be analyzed
++     * @return Response object that includes the annotations to be created along with status information
++     */
++	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java
+new file mode 100755
+index 0000000..0805b30
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Kafka broker node details")
++public class BrokerNode {
++	@ApiModelProperty(value="Kafka broker identifier", readOnly=true, required=true)
++	private String host;
++
++	@ApiModelProperty(value="Indicates whether the broker is the leader of the partition", readOnly=true, required=true)
++	private boolean isLeader;
++
++	public boolean isLeader() {
++		return isLeader;
++	}
++
++	public void setLeader(boolean isLeader) {
++		this.isLeader = isLeader;
++	}
++
++	public String getHost() {
++		return host;
++	}
++
++	public void setHost(String host) {
++		this.host = host;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java
+new file mode 100755
+index 0000000..4c441a9
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java
+@@ -0,0 +1,76 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.List;
++
++/**
++*
++* External Java API for managing and controlling the ODF engine
++*
++*/
++public interface EngineManager {
++
++	/**
++	 * Checks the health status of ODF
++	 *
++	 * @return Health status of the ODF engine
++	 */
++	public SystemHealth checkHealthStatus();
++
++	
++	/**
++	 * Get information about all available service runtimes.
++	 * 
++	 * @return Runtimes info
++	 */
++	ServiceRuntimesInfo getRuntimesInfo();
++	
++	/**
++	 * Returns the status of the ODF thread manager
++	 *
++	 * @return Status of all threads making up the ODF thread manager
++	 */
++	public List<ThreadStatus> getThreadManagerStatus();
++
++	/**
++	 * Returns the status of the ODF messaging subsystem
++	 *
++	 * @return Status of the ODF messaging subsystem
++	 */
++	public MessagingStatus getMessagingStatus();
++
++	/**
++	 * Returns the status of the messaging subsystem and the internal thread manager
++	 *
++	 * @return Combined status of the messaging subsystem and the internal thread manager
++	 */
++	public ODFStatus getStatus();
++
++	/**
++	 * Returns the current ODF version
++	 *
++	 * @return ODF version identifier
++	 */
++	public ODFVersion getVersion();
++
++	/**
++	 * Shuts down the ODF engine, purges all scheduled analysis requests from the queues, and cancels all running analysis requests.
++	 * This means that all running jobs will be cancelled or their results will not be reported back.
++	 * (for debugging purposes only)
++	 * 
++	 * @param options Option for immediately restarting the engine after shutdown (default is not to restart immediately but only when needed) 
++	 */
++	public void shutdown(ODFEngineOptions options);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java
+new file mode 100755
+index 0000000..fdd84af
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.HashMap;
++import java.util.Map;
++
++public class KafkaBrokerPartitionMessageCountInfo {
++
++	private String broker;
++	private Map<Integer, Long> partitionMsgCountMap = new HashMap<Integer, Long>();
++
++	public String getBroker() {
++		return broker;
++	}
++
++	public void setBroker(String broker) {
++		this.broker = broker;
++	}
++
++	public Map<Integer, Long> getPartitionMsgCountMap() {
++		return partitionMsgCountMap;
++	}
++
++	public void setPartitionMsgCountMap(Map<Integer, Long> partitionMsgCountMap) {
++		this.partitionMsgCountMap = partitionMsgCountMap;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java
+new file mode 100755
+index 0000000..5f6e4f8
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java
+@@ -0,0 +1,45 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Information on Kafka offsets per group id")
++public class KafkaGroupOffsetInfo {
++	@ApiModelProperty(value="Kafka group id", readOnly=true, required=true)
++	private String groupId;
++
++	@ApiModelProperty(value="List of Kafka offsets", readOnly=true, required=true)
++	private List<PartitionOffsetInfo> offsets = new ArrayList<PartitionOffsetInfo>();
++
++	public String getGroupId() {
++		return groupId;
++	}
++
++	public void setGroupId(String groupId) {
++		this.groupId = groupId;
++	}
++
++	public List<PartitionOffsetInfo> getOffsets() {
++		return offsets;
++	}
++
++	public void setOffsets(List<PartitionOffsetInfo> offsets) {
++		this.offsets = offsets;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java
+new file mode 100755
+index 0000000..8ab8f15
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java
+@@ -0,0 +1,45 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Kafka nodes belonging to a specific partition")
++public class KafkaPartitionInfo {
++	@ApiModelProperty(value="Partition id", readOnly=true, required=true)
++	private Integer partitionId;
++
++	@ApiModelProperty(value="List of nodes containing this partition", readOnly=true, required=true)
++	private List<BrokerNode> nodes;
++
++	public List<BrokerNode> getNodes() {
++		return nodes;
++	}
++
++	public void setNodes(List<BrokerNode> nodes) {
++		this.nodes = nodes;
++	}
++
++	public Integer getPartitionId() {
++		return partitionId;
++	}
++
++	public void setPartitionId(Integer partitionId) {
++		this.partitionId = partitionId;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java
+new file mode 100755
+index 0000000..10ff1a5
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java
+@@ -0,0 +1,46 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of the Kafka ODF queues")
++public class KafkaStatus extends MessagingStatus {
++	@ApiModelProperty(value="List of message brokers", readOnly=true)
++	private List<String> brokers = new ArrayList<String>();
++
++	@ApiModelProperty(value="Status of the individual topics", readOnly=true)
++	private List<KafkaTopicStatus> topicStatus = new ArrayList<KafkaTopicStatus>();
++
++	public List<String> getBrokers() {
++		return brokers;
++	}
++
++	public void setBrokers(List<String> brokers) {
++		this.brokers = brokers;
++	}
++
++	public List<KafkaTopicStatus> getTopicStatus() {
++		return topicStatus;
++	}
++
++	public void setTopicStatus(List<KafkaTopicStatus> topicStatus) {
++		this.topicStatus = topicStatus;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java
+new file mode 100755
+index 0000000..7e41939
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java
+@@ -0,0 +1,69 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of an individual Kafka topic")
++public class KafkaTopicStatus {
++	
++	@ApiModelProperty(value="Kafka topic", readOnly=true, required=true)
++	private String topic;
++
++	@ApiModelProperty(value="Information on Kafka offsets per group id (can be used by the admin to track how many messages are still waiting to be consumed)", readOnly=true, required=true)
++	private List<KafkaGroupOffsetInfo> consumerGroupOffsetInfo = new ArrayList<KafkaGroupOffsetInfo>();
++
++	@ApiModelProperty(value="List of Kafka partitions and the nodes they belong to", readOnly=true, required=true)
++	private List<KafkaPartitionInfo> partitionBrokersInfo = new ArrayList<KafkaPartitionInfo>();
++
++	@ApiModelProperty(value="Message counts of individual brokers", readOnly=true, required=true)
++	private List<KafkaBrokerPartitionMessageCountInfo> brokerPartitionMessageCountInfo = new ArrayList<KafkaBrokerPartitionMessageCountInfo>();
++
++	public String getTopic() {
++		return topic;
++	}
++
++	public void setTopic(String topic) {
++		this.topic = topic;
++	}
++
++	public List<KafkaGroupOffsetInfo> getConsumerGroupOffsetInfo() {
++		return consumerGroupOffsetInfo;
++	}
++
++	public void setConsumerGroupOffsetInfo(List<KafkaGroupOffsetInfo> offsetInfoList) {
++		this.consumerGroupOffsetInfo = offsetInfoList;
++	}
++
++	public List<KafkaPartitionInfo> getPartitionBrokersInfo() {
++		return partitionBrokersInfo;
++	}
++
++	public void setPartitionBrokersInfo(List<KafkaPartitionInfo> partitionBrokersMap) {
++		this.partitionBrokersInfo = partitionBrokersMap;
++	}
++
++	public List<KafkaBrokerPartitionMessageCountInfo> getBrokerPartitionMessageInfo() {
++		return brokerPartitionMessageCountInfo;
++	}
++
++	public void setBrokerPartitionMessageInfo(List<KafkaBrokerPartitionMessageCountInfo> brokerInfo) {
++		this.brokerPartitionMessageCountInfo = brokerInfo;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java
+new file mode 100755
+index 0000000..f3248ac
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java
+@@ -0,0 +1,21 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++
++@ApiModel(description="Status of the ODF queues", subTypes={KafkaStatus.class})
++public class MessagingStatus {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java
+new file mode 100755
+index 0000000..fb3d3d6
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="ODF startup options.")
++public class ODFEngineOptions {
++	
++	@ApiModelProperty(value="Indicates whether to explicitly restart the queues after shutting down the ODF engine (or to implicitly restart them when needed)", required=true)
++	private boolean restart = false;
++	
++	public boolean isRestart() {
++		return this.restart;
++	}
++
++	public void setRestart(boolean restart) {
++		this.restart = restart;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java
+new file mode 100755
+index 0000000..3ae9068
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java
+@@ -0,0 +1,45 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Overall ODF status.")
++public class ODFStatus {
++
++	@ApiModelProperty(value="Status of the ODF queues", readOnly=true)
++	private MessagingStatus messagingStatus;
++
++	@ApiModelProperty(value="Status of the ODF thread manager", readOnly=true)
++	private List<ThreadStatus> threadManagerStatus;
++
++	public MessagingStatus getMessagingStatus() {
++		return this.messagingStatus;
++	}
++
++	public void setMessagingStatus(MessagingStatus messagingStatus) {
++		this.messagingStatus = messagingStatus;
++	}
++
++	public List<ThreadStatus> getThreadManagerStatus() {
++		return this.threadManagerStatus;
++	}
++
++	public void setThreadManagerStatus(List<ThreadStatus> threadManagerStatus) {
++		this.threadManagerStatus = threadManagerStatus;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java
+new file mode 100755
+index 0000000..d18825b
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="ODF version information.")
++public class ODFVersion {
++
++	@ApiModelProperty(value="Version of the ODF instance", readOnly=true, required=true)
++	private String version;
++
++	public String getVersion() {
++		return this.version;
++	}
++
++	public void setVersion(String version) {
++		this.version = version;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java
+new file mode 100755
+index 0000000..ccaec51
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java
+@@ -0,0 +1,53 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of an individual Kafka offset")
++public class PartitionOffsetInfo {
++	@ApiModelProperty(value="Partition id", readOnly=true, required=true)
++	private Integer partitionId;
++
++	@ApiModelProperty(value="Kafka offset identifying the last consumed message within the partition", readOnly=true, required=true)
++	private Long offset;
++
++	@ApiModelProperty(value="Status message", readOnly=true)
++	private String message;
++
++	public Integer getPartitionId() {
++		return partitionId;
++	}
++
++	public void setPartitionId(Integer partitionId) {
++		this.partitionId = partitionId;
++	}
++
++	public Long getOffset() {
++		return offset;
++	}
++
++	public void setOffset(Long offset) {
++		this.offset = offset;
++	}
++
++	public String getMessage() {
++		return message;
++	}
++
++	public void setMessage(String message) {
++		this.message = message;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java
+new file mode 100755
+index 0000000..4f3e871
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java
+@@ -0,0 +1,36 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++public class ServiceRuntimeInfo {
++	private String name;
++	private String description;
++
++	public String getName() {
++		return name;
++	}
++
++	public void setName(String name) {
++		this.name = name;
++	}
++
++	public String getDescription() {
++		return description;
++	}
++
++	public void setDescription(String description) {
++		this.description = description;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java
+new file mode 100755
+index 0000000..a244127
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java
+@@ -0,0 +1,29 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.List;
++
++public class ServiceRuntimesInfo {
++	private List<ServiceRuntimeInfo> runtimes;
++
++	public List<ServiceRuntimeInfo> getRuntimes() {
++		return runtimes;
++	}
++
++	public void setRuntimes(List<ServiceRuntimeInfo> runtimes) {
++		this.runtimes = runtimes;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java
+new file mode 100755
+index 0000000..b6b918b
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java
+@@ -0,0 +1,62 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Overall ODF system health.")
++public class SystemHealth {
++	
++	public static enum HealthStatus {
++		OK, WARNING, ERROR
++	}
++
++	@ApiModelProperty(value="ODF health status", readOnly=true, required=true)
++	private HealthStatus status;
++
++	@ApiModelProperty(value="List of status messages", readOnly=true)
++	private List<String> messages = new ArrayList<>();
++
++	@ApiModelProperty(value="Health status of the individual subsystems", readOnly=true)
++	private List<SystemHealth> subSystemsHealth = new ArrayList<>();
++
++	public HealthStatus getStatus() {
++		return status;
++	}
++
++	public void setStatus(HealthStatus status) {
++		this.status = status;
++	}
++
++	public List<String> getMessages() {
++		return messages;
++	}
++
++	public void setMessages(List<String> messages) {
++		this.messages = messages;
++	}
++
++	public List<SystemHealth> getSubSystemsHealth() {
++		return subSystemsHealth;
++	}
++
++	public void setSubSystemsHealth(List<SystemHealth> subSystemsHealth) {
++		this.subSystemsHealth = subSystemsHealth;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java
+new file mode 100755
+index 0000000..74e939e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java
+@@ -0,0 +1,57 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.engine;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Status of the ODF thread manager")
++public class ThreadStatus {
++
++	public static enum ThreadState { RUNNING, FINISHED, NON_EXISTENT }
++
++	@ApiModelProperty(value="Thread id", readOnly=true)
++	private String id;
++
++	@ApiModelProperty(value="Thread status", readOnly=true)
++	private ThreadState state;
++
++	@ApiModelProperty(value="Thread type", readOnly=true)
++	private String type;
++
++	public String getType() {
++		return type;
++	}
++
++	public void setType(String type) {
++		this.type = type;
++	}
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public ThreadState getState() {
++		return state;
++	}
++
++	public void setState(ThreadState state) {
++		this.state = state;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java
+new file mode 100755
+index 0000000..1f48d0d
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java
+@@ -0,0 +1,31 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++
++/**
++ * Interface for the logic that propagates annotations from the AnnotationStore to the MetadataStore 
++ *
++ */
++public interface AnnotationPropagator {
++
++	/**
++	 * Run the actual propagation process 
++	 *@param as The annotation store from which the annotations should be taken from
++	 *@param requestId Propagate only annotations that belong to a specific analysis request id (optional)
++	 * 
++	 */
++	void propagateAnnotations(AnnotationStore as, String requestId);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java
+new file mode 100755
+index 0000000..849277c
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java
+@@ -0,0 +1,61 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++public class AtlasMetadataQueryBuilder extends MetadataQueryBuilder {
++
++	@Override
++	public String build() {
++		if (this.objectType != null) {
++			StringBuilder query = new StringBuilder("from " + objectType);
++			boolean firstCondition = true;
++			if (this.conditions != null) {
++				for (Condition condition : conditions) {
++					if (condition instanceof SimpleCondition) {
++						SimpleCondition simpleCond = (SimpleCondition) condition;
++						if (firstCondition) {
++							query.append(" where ");
++						} else {
++							query.append(" and ");
++						}
++						query.append(simpleCond.getAttributeName());
++						switch (simpleCond.getComparator()) {
++						case EQUALS:
++							query.append(" = ");
++							break;
++						case NOT_EQUALS:
++							query.append(" != ");
++							break;
++						default:
++							throw new RuntimeException("Comparator " + simpleCond.getComparator() + " is currently not supported");
++						}
++						Object val = simpleCond.getValue();
++						if (val instanceof MetaDataObjectReference) {
++							query.append("'" + ((MetaDataObjectReference) val).getId() + "'");
++						} else if (val instanceof String) {
++							query.append("'" + val.toString() + "'");
++						} else if (val == null) {
++							query.append("null");
++						} else {
++							query.append(val.toString());
++						}
++					}
++					firstCondition = false;
++				}
++			}
++			return query.toString();
++		}
++		return null;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java
+new file mode 100755
+index 0000000..c9e59e7
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java
+@@ -0,0 +1,69 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++public class DefaultMetadataQueryBuilder extends MetadataQueryBuilder {
++
++	public static final String SEPARATOR_STRING = " ";
++	public static final String DATASET_IDENTIFIER = "from";
++	public static final String CONDITION_PREFIX = "where";
++	public static final String AND_IDENTIFIER = "and";
++	public static final String EQUALS_IDENTIFIER = "=";
++	public static final String NOT_EQUALS_IDENTIFIER = "<>";
++	public static final String QUOTE_IDENTIFIER = "'";
++
++	@Override
++	public String build() {
++		if (this.objectType != null) {
++			StringBuilder query = new StringBuilder(DATASET_IDENTIFIER + SEPARATOR_STRING + objectType);
++			if (this.conditions != null) {
++				boolean firstCondition = true;
++				for (Condition condition : conditions) {
++					if (condition instanceof SimpleCondition) {
++						SimpleCondition simpleCond = (SimpleCondition) condition;
++						if (firstCondition) {
++							query.append(SEPARATOR_STRING + AND_IDENTIFIER + SEPARATOR_STRING);
++						} else {
++							query.append(SEPARATOR_STRING + CONDITION_PREFIX + SEPARATOR_STRING);
++						}
++						query.append(simpleCond.getAttributeName());
++						switch (simpleCond.getComparator()) {
++						case EQUALS:
++							query.append(SEPARATOR_STRING + EQUALS_IDENTIFIER + SEPARATOR_STRING);
++							break;
++						case NOT_EQUALS:
++							query.append(SEPARATOR_STRING + NOT_EQUALS_IDENTIFIER + SEPARATOR_STRING);
++							break;
++						default:
++							throw new RuntimeException("Comparator " + simpleCond.getComparator() + " is currently not supported");
++						}
++						Object val = simpleCond.getValue();
++						if (val instanceof MetaDataObjectReference) {
++							query.append(QUOTE_IDENTIFIER + ((MetaDataObjectReference) val).getId() + QUOTE_IDENTIFIER);
++						} else if (val instanceof String) {
++							query.append(QUOTE_IDENTIFIER + val.toString() + QUOTE_IDENTIFIER);
++						} else if (val == null) {
++							query.append("null");
++						} else {
++							query.append(val.toString());
++						}
++					}
++					firstCondition = false;
++				}
++			}
++			return query.toString();
++		}
++		return null;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java
+new file mode 100755
+index 0000000..41ad9e1
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java
+@@ -0,0 +1,44 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.Properties;
++
++/**
++ * A common interface for stores that are external to ODF.
++ * Provides connection test methods and basic metadata about the store. 
++ *  
++ *
++ */
++public interface ExternalStore {
++	static enum ConnectionStatus { OK, AUTHORIZATION_FAILED, UNREACHABLE, UNKOWN_ERROR };
++	
++	static final String STORE_PROPERTY_DESCRIPTION = "STORE_PROPERTY_DESCRIPTION"; 
++	static final String STORE_PROPERTY_TYPE = "STORE_PROPERTY_TYPE"; 
++	static final String STORE_PROPERTY_ID = "STORE_PROPERTY_ID"; 
++	
++	/**
++	 * @return the properties of this metadata object store instance.
++	 * Must return at least STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_TYPE, and STORE_PROPERTY_ID.
++	 */
++	Properties getProperties();
++	
++	/**
++	 * @return the unique repository Id for this metadata store
++	 */
++	String getRepositoryId();
++	
++	ConnectionStatus testConnection();
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java
+new file mode 100755
+index 0000000..7eca5cb
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java
+@@ -0,0 +1,88 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++
++/**
++ * Internal metadata utilities
++ * 
++ */
++public class InternalMetaDataUtils {
++	public static final String ODF_PARENT_REFERENCE = "PARENT";
++	public static final String ODF_CHILDREN_REFERENCE = "CHILDREN";
++
++	/**
++	 * Turn a list of metadata objects into a list of references to the corresponding metadata objects
++	 *  
++	 * @param objectList Given list of metadata objects
++	 * @return Resulting list of references to the metadata objects
++	 */
++	public static List<MetaDataObjectReference> getReferenceList(List<MetaDataObject> objectList) {
++		List<MetaDataObjectReference> result = new ArrayList<MetaDataObjectReference>();
++		for (MetaDataObject obj : objectList) {
++			result.add(obj.getReference());
++		}
++		return result;
++	}
++
++	/**
++	 * Convert a list of metadata object references into a list of the corresponding metadata objects
++	 *  
++	 * @param referenceList Given list of metadata object references
++	 * @return Resulting list metadata objects
++	 */
++	public static <T> List<T>  getObjectList(MetadataStore mds, List<MetaDataObjectReference> referenceList, Class<T> type) {
++		List<T> result = new ArrayList<T>();
++		for (MetaDataObjectReference ref : referenceList) {
++			MetaDataObject obj = mds.retrieve(ref);
++			if (obj != null) {
++				try {
++					result.add(type.cast(obj));
++				} catch(ClassCastException e) {
++					String errorMessage = MessageFormat.format("Metadata object with id ''{0}'' cannot be cast to type ''{1}''.", new Object[] { ref.getId(), type.getName() });
++					throw new MetadataStoreException(errorMessage);
++				}
++			} else {
++				String errorMessage = MessageFormat.format("Metadata object with reference ''{0}'' could not be retrieved from metadata store ''{1}''.", new Object[] { ref, mds.getRepositoryId() });
++				throw new MetadataStoreException(errorMessage);
++			}
++		}
++		return result;
++	}
++
++	/**
++	 * Merge a set of given list of references to metadata objects into a single list.
++	 *  
++	 * @param refListArray Array of given lists of references
++	 * @return Resulting merged list of references
++	 */
++	@SafeVarargs
++	public static List<MetaDataObjectReference> mergeReferenceLists(List<MetaDataObjectReference>... refListArray) {
++		HashMap<String, MetaDataObjectReference> referenceHashMap = new HashMap<String, MetaDataObjectReference>();
++		for (List<MetaDataObjectReference> refList : refListArray) {
++			if (refList != null) {
++				for (MetaDataObjectReference ref : refList) {
++					referenceHashMap.put(ref.getId(), ref);
++				}
++			}
++		}
++		return new ArrayList<MetaDataObjectReference>(referenceHashMap.values());
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java
+new file mode 100755
+index 0000000..e5ebfda
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java
+@@ -0,0 +1,93 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++
++/**
++ * Common base for default metadata store and metadata cache.
++ * 
++ * 
++ */
++public abstract class InternalMetadataStoreBase extends MetadataStoreBase implements MetadataStore {
++
++	protected abstract HashMap<String, StoredMetaDataObject> getObjects();
++
++	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type) {
++		if ((metaDataObject == null) || (metaDataObject.getReference() == null)) {
++			throw new MetadataStoreException("Metadata object or its reference attribute cannot be null.");
++		}
++		List<T> result = new ArrayList<T>();
++		StoredMetaDataObject internalObj = getObjects().get(metaDataObject.getReference().getId());
++		if ((internalObj != null) && (internalObj.getReferenceMap().get(attributeName) != null)) {
++			for (MetaDataObjectReference ref : internalObj.getReferenceMap().get(attributeName)) {
++				MetaDataObject obj = retrieve(ref);
++				if (obj != null) {
++					// Ignore objects that are not available in metadata store
++					// TODO: Consider to use invalide reference if an object is not available
++					try {
++						result.add(type.cast(retrieve(ref)));
++					} catch(ClassCastException e) {
++						String errorMessage = MessageFormat.format("Inconsistent object reference: A reference of type ''{0}'' cannot be cast to type ''{1}''.", new Object[] { attributeName, type.getName() });
++						throw new MetadataStoreException(errorMessage);
++					}
++				}
++			}
++		}
++		return result;
++	}
++
++	abstract protected Object getAccessLock();
++
++	@Override
++	public MetaDataObject getParent(MetaDataObject metaDataObject) {
++		List<MetaDataObject> parentList = new ArrayList<MetaDataObject>();
++		// TODO: Make this more efficient
++		for (StoredMetaDataObject internalMdo : getObjects().values()) {
++			for (MetaDataObject child : getChildren(internalMdo.getMetaDataObject())) {
++				if (child.getReference().getId().equals(metaDataObject.getReference().getId())) {
++					parentList.add(internalMdo.getMetaDataObject());
++				}
++			}
++		}
++		if (parentList.size() == 1) {
++			return parentList.get(0);
++		} else if (parentList.size() == 0) {
++			return null;
++		}
++		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
++		throw new MetadataStoreException(errorMessage);
++	}
++
++	@Override
++	public MetaDataObject retrieve(MetaDataObjectReference reference) {
++		synchronized(getAccessLock()) {
++			String objectId = reference.getId();
++			if (getObjects().containsKey(objectId)) {
++				return getObjects().get(objectId).getMetaDataObject();
++			}
++			return null;
++		}
++	}
++
++	@Override
++	public MetadataQueryBuilder newQueryBuilder() {
++		return new DefaultMetadataQueryBuilder();
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java
+new file mode 100755
+index 0000000..d112720
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java
+@@ -0,0 +1,77 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.ArrayList;
++import java.util.List;
++
++/**
++ * Helper method to handle "invalid" references. 
++ * 
++ * Invalid references are typically returned by the metadata store implementation to indicate that a reference (or a reference list) was not provided.
++ * This could be the case, e.g., for performance reasons when finding a reference might be time consuming. 
++ * In such a case the application should explicitly use the MetadataQueryBuilder to get to the reference (list).
++ * 
++ * Clients should check any MetadataObjectReference and List<MetadataObjectRefernce> 
++ * in retrieved MetadataObjects if the value is an instance of this class.
++ * 
++ * 
++ */
++public class InvalidReference  {
++	
++	public static final String INVALID_METADATAOBJECT_REFERENCE_ID = "INVALID_METADATAOBJECT_REFERENCE_ID";
++	public static final String INVALID_METADATAOBJECT_REFERENCE_LIST_ID = "INVALID_METADATAOBJECT_REFERENCE_LIST_ID";
++	
++	/**
++	 * use this method to indicate that a reference is invalid.
++	 */
++	public static MetaDataObjectReference createInvalidReference(String repositoryId) {
++		MetaDataObjectReference invalidRef = new MetaDataObjectReference();
++		invalidRef.setRepositoryId(repositoryId);
++		invalidRef.setId(INVALID_METADATAOBJECT_REFERENCE_ID);
++		return invalidRef;
++	}
++	
++	public static boolean isInvalidRef(MetaDataObjectReference ref) {
++		if (ref == null) {
++			return false;
++		}
++		return INVALID_METADATAOBJECT_REFERENCE_ID.equals(ref.getId());
++	}
++	
++	
++	/**
++	 * use this method to indicate that a list of references is invalid.
++	 */
++	public static List<MetaDataObjectReference> createInvalidReferenceList(String repositoryId) {
++		List<MetaDataObjectReference> invalidRefList = new ArrayList<>();
++		MetaDataObjectReference invalidRefMarker = new MetaDataObjectReference();
++		invalidRefMarker.setRepositoryId(repositoryId);
++		invalidRefMarker.setId(INVALID_METADATAOBJECT_REFERENCE_LIST_ID);
++		invalidRefList.add(invalidRefMarker);
++		return invalidRefList;
++	}
++	
++	public static boolean isInvalidRefList(List<MetaDataObjectReference> refList) {
++		if (refList.size() != 1) {
++			return false;
++		}
++		MetaDataObjectReference ref = refList.get(0);
++		if (ref == null) {
++			return false;
++		}
++		return INVALID_METADATAOBJECT_REFERENCE_LIST_ID.equals(ref.getId());
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java
+new file mode 100755
+index 0000000..de61568
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java
+@@ -0,0 +1,100 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import com.fasterxml.jackson.annotation.JsonIgnore;
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * This class describes the location of a MetadataObject
++ *
++ */
++@ApiModel(description="Reference to a metadata object.")
++public class MetaDataObjectReference {
++	@ApiModelProperty(value="Unique id of the object", required=true)
++	private String id;
++
++	@ApiModelProperty(value="Id of the metadata repository where the object is registered", required=true)
++	private String repositoryId;
++
++	@ApiModelProperty(value="URL of the object in the metadata repository", required=true)
++	private String url;
++
++	@JsonIgnore
++	private ReferenceCache cache;
++
++	public String getId() {
++		return id;
++	}
++
++	public void setId(String id) {
++		this.id = id;
++	}
++
++	public boolean equals(Object other) {
++		if (other == null) {
++			return false;
++		}
++		if (!(other instanceof MetaDataObjectReference)) {
++			return false;
++		}
++		MetaDataObjectReference otherMDO = (MetaDataObjectReference) other;
++		if (!this.id.equals(otherMDO.id)) {
++			return false;
++		}
++		if (this.repositoryId == null) {
++			return otherMDO.repositoryId == null;
++		}
++		return this.repositoryId.equals(otherMDO.repositoryId);
++	}
++
++	public int hashCode() {
++		int result = 0;
++		if (this.repositoryId != null) {
++			result = repositoryId.hashCode();
++		}
++		return result + this.id.hashCode();
++	}
++
++	public String toString() {
++		return this.repositoryId + "|||" + this.id;
++	}
++
++	public String getRepositoryId() {
++		return repositoryId;
++	}
++
++	public void setRepositoryId(String repositoryId) {
++		this.repositoryId = repositoryId;
++	}
++
++	public String getUrl() {
++		return url;
++	}
++
++	public void setUrl(String url) {
++		this.url = url;
++	}
++
++	public ReferenceCache getCache() {
++		return cache;
++	}
++
++	public void setCache(ReferenceCache cache) {
++		this.cache = cache;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java
+new file mode 100755
+index 0000000..643f203
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java
+@@ -0,0 +1,92 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.ArrayList;
++import java.util.List;
++
++/**
++ * Abstract base class for a builder that can be used to create metadata queries.
++ * It uses the Java builder pattern.
++ * 
++ * There are two types of methods:
++ * 1. Chainable methods that can be used to do simple filtering, e.g.,
++ *       {@code String query = queryBuilder.objectType("DataSet").simpleCondition("name", COMPARATOR.EQUALS, "waldo").build();}
++ * 2. Predefined queries that are not chainable. These are very specific queries that currently cannot be built with the chainable methods, e.g.,
++ *       {@code String query = queryBuilder.connectionsForDataSet(dataSetId).build();}
++ * 
++ * When subclassing, note that the methods set the appropriate protected fields to null to indicate that the query was "overwritten". 
++ * 
++ * @See {@link MetadataStore}
++ */
++public abstract class MetadataQueryBuilder {
++
++	public static enum COMPARATOR {
++		EQUALS, NOT_EQUALS
++	};
++
++	protected static class Condition {
++	};
++
++	protected static class SimpleCondition extends Condition {
++		public SimpleCondition(String attributeName, COMPARATOR comparator, Object value) {
++			super();
++			this.attributeName = attributeName;
++			this.comparator = comparator;
++			this.value = value;
++		}
++
++		private String attributeName;
++		private COMPARATOR comparator;
++		private Object value;
++
++		public String getAttributeName() {
++			return attributeName;
++		}
++
++		public COMPARATOR getComparator() {
++			return comparator;
++		}
++
++		public Object getValue() {
++			return value;
++		}
++
++	}
++
++	protected String objectType;
++	protected List<Condition> conditions;
++
++	public abstract String build();
++
++	/**
++	 * Set the type of object to be queried. Names are the ones of the common model (e.g. Table, Column, etc.)
++	 */
++	public MetadataQueryBuilder objectType(String objectTypeName) {
++		this.objectType = objectTypeName;
++		return this;
++	}
++
++	/**
++	 * Add a simple condition to the query. All conditions are "ANDed".
++	 */
++	public MetadataQueryBuilder simpleCondition(String attributeName, COMPARATOR comparator, Object value) {
++		if (conditions == null) {
++			conditions = new ArrayList<>();
++		}
++		conditions.add(new SimpleCondition(attributeName, comparator, value));
++		return this;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java
+new file mode 100755
+index 0000000..7a50ced
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java
+@@ -0,0 +1,173 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++
++/**
++ * Interfaces to be implemented by a metadata store in order to be used with ODF.
++ * 
++ * In addition to this interface, each ODF metadata store must support the ODF base types defined by the
++ * {@link WritableMetadataStoreUtils#getBaseTypes} method.
++ *
++ */
++public interface MetadataStore extends ExternalStore {
++
++	/**
++	 * Retrieve information required to access the actual data behind an information asset, e.g. the connection info
++	 * to retrieve the data in a JDBC table.
++	 *  
++	 * @param informationAsset Given information asset
++	 * @return Connection information required for data access
++	 */
++	ConnectionInfo getConnectionInfo(MetaDataObject informationAsset);
++
++	/**
++	 * Retrieve a metadata object by its metadata object reference.
++	 *  
++	 * @param reference Metadata object reference
++	 * @return Metadata object
++	 */
++	MetaDataObject retrieve(MetaDataObjectReference reference);
++	
++	/**
++	 * Perform a search against the metadata store. The query should be generated using the {@link MetadataQueryBuilder}
++	 * returned by the {@link #newQueryBuilder()} method.
++
++	 * @param query Query string
++	 * @return List of references to metadata objects found by the query
++	 */
++	List<MetaDataObjectReference> search(String query);
++	
++	/**
++	 * Populates the metadata store with example datasets. This method is optional, however in order to support the ODF
++	 * integration tests, this method must create the object returned by the {@link WritableMetadataStoreUtils#getSampleDataObjects}
++	 * method.
++	 * 
++	 */
++	void createSampleData();
++
++	/**
++	 * Deletes all data from this repository. This method is optional, however it must be implemented in order to support the ODF
++	 * integration tests.
++	 * 
++	 */
++	void resetAllData();
++	
++	MetadataQueryBuilder newQueryBuilder();
++	
++	/**
++	 * Return an implementation of the {@link AnnotationPropagator} interface that propagates ODF annotations into the metadata store.
++	 * The method may return null if the metadata store does not support annotation propagation.
++	 * 
++	 * return the AnnotationPropagator for this MetadataStore.
++	 */
++	AnnotationPropagator getAnnotationPropagator();
++
++	/**
++	 * Retrieve references of a specific type from an object stored in the metadata store.
++	 * A list of available reference types can be retrieved with the {@link #getReferenceTypes() getReferenceTypes} method.
++	 *  
++	 * @param metaDataObject Given metadata object to retrieve the references from
++	 * @param attributeName Name of the reference
++	 * @return List of objects referenced by the given metadata object
++	 */
++	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject);
++
++	/**
++	 * Return the list of available reference types supported by the {@link #getReferences(String, MetaDataObject) getReferences} method of the metadata store.
++	 * The list indicates which reference types are added to the internal metadata cache when a discovery service is called. That way, they will be available
++	 * to the service at runtime even if the service has no access to the metadata store.
++	 *  
++	 * @return List of supported reference types 
++	 */
++	public List<String> getReferenceTypes();
++
++	/**
++	 * Retrieve the parent object of a given object stored in the metadata store.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return Parent object of the metadata object
++	 */
++	public MetaDataObject getParent(MetaDataObject metaDataObject);
++
++	/**
++	 * Retrieve the child objects of a given object stored in the metadata store.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of child objects objects referenced by the given metadata object
++	 */
++	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject);
++
++	/**
++	 * Retrieve data file objects referenced by a data file folder object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of data file objects
++	 */
++	public List<DataFile> getDataFiles(DataFileFolder folder);
++
++	/**
++	 * Retrieve data file folder objects referenced by a data file folder object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of data file folder objects
++	 */
++	public List<DataFileFolder> getDataFileFolders(DataFileFolder folder);
++
++	/**
++	 * Retrieve schema objects referenced by a database object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of schema objects
++	 */
++	public List<Schema> getSchemas(Database database);
++
++	/**
++	 * Retrieve table objects referenced by a schema object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of table objects
++	 */
++	public List<Table> getTables(Schema schema);
++
++	/**
++	 * Retrieve column objects referenced by a table object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of column objects
++	 */
++	public List<Column> getColumns(RelationalDataSet relationalDataSet);
++
++	/**
++	 * Retrieve connection objects referenced by a data store object.
++	 *  
++	 * @param metaDataObject Given metadata object
++	 * @return List of connection objects
++	 */
++	public List<Connection> getConnections(DataStore dataStore);
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java
+new file mode 100755
+index 0000000..9ad68bf
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java
+@@ -0,0 +1,111 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++
++/**
++ * Common base that may be used for any metadata store implementation.
++ * 
++ * 
++ */
++public abstract class MetadataStoreBase implements MetadataStore {
++
++	public static final String ODF_CONNECTIONS_REFERENCE = "CONNECTIONS";
++	public static final String ODF_COLUMNS_REFERENCE = "COLUMNS";
++	public static final String ODF_DATAFILEFOLDERS_REFERENCE = "DATAFILEFOLDERS";
++	public static final String ODF_DATAFILES_REFERENCE = "DATAFILES";
++	public static final String ODF_SCHEMAS_REFERENCE = "SCHEMAS";
++	public static final String ODF_TABLES_REFERENCE = "TABLES";
++
++	protected abstract <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type);
++
++	@Override
++	public List<String> getReferenceTypes() {
++		List<String> result = new ArrayList<String>();
++		result.add(ODF_CONNECTIONS_REFERENCE);
++		result.add(ODF_COLUMNS_REFERENCE);
++		result.add(ODF_DATAFILEFOLDERS_REFERENCE);
++		result.add(ODF_DATAFILES_REFERENCE);
++		result.add(ODF_SCHEMAS_REFERENCE);
++		result.add(ODF_TABLES_REFERENCE);
++		return result;
++	}
++
++	@Override
++	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject) {
++		return getReferences(attributeName, metaDataObject, MetaDataObject.class);
++	}
++
++	@Override
++	public List<DataFile> getDataFiles(DataFileFolder folder) {
++		return getReferences(ODF_DATAFILES_REFERENCE, folder, DataFile.class);
++	}
++
++	@Override
++	public List<DataFileFolder> getDataFileFolders(DataFileFolder folder) {
++		return getReferences(ODF_DATAFILEFOLDERS_REFERENCE, folder, DataFileFolder.class);
++	}
++
++	@Override
++	public List<Schema> getSchemas(Database database) {
++		return getReferences(ODF_SCHEMAS_REFERENCE, database, Schema.class);
++	}
++
++	@Override
++	public List<Table> getTables(Schema schema) {
++		return getReferences(ODF_TABLES_REFERENCE, schema, Table.class);
++	}
++
++	@Override
++	public List<Column> getColumns(RelationalDataSet relationalDataSet) {
++		return getReferences(ODF_COLUMNS_REFERENCE, relationalDataSet, Column.class);
++	}
++
++	@Override
++	public List<Connection> getConnections(DataStore dataStore) {
++		return getReferences(ODF_CONNECTIONS_REFERENCE, dataStore, Connection.class);
++	}
++
++	@Override
++	public ConnectionStatus testConnection() {
++		return ConnectionStatus.OK;
++	}
++
++	@Override
++	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject) {
++		List<MetaDataObject> result = new ArrayList<MetaDataObject>();
++		for (String referenceType : getReferenceTypes()) {
++			for (MetaDataObject ref : getReferences(referenceType, metaDataObject, MetaDataObject.class)) {
++				if (!result.contains(ref)) {
++					result.add(ref);
++				}
++			}
++		}
++		return result;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java
+new file mode 100755
+index 0000000..7c84a61
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java
+@@ -0,0 +1,36 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++public class MetadataStoreException extends RuntimeException {
++
++	private static final long serialVersionUID = -8509622412001869582L;
++
++	public MetadataStoreException() {
++		super();
++	}
++
++	public MetadataStoreException(String message, Throwable cause) {
++		super(message, cause);
++	}
++
++	public MetadataStoreException(String message) {
++		super(message);
++	}
++
++	public MetadataStoreException(Throwable cause) {
++		super(cause);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java
+new file mode 100755
+index 0000000..5601614
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java
+@@ -0,0 +1,51 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.connectivity.RESTClientManager;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++
++public class RESTMetadataStoreHelper {
++
++	static Logger logger = Logger.getLogger(RESTMetadataStoreHelper.class.getName());
++
++	/**
++	 * Return a ConnectionStatus object assuming that the URI is static in the sense that
++	 * the metadata store is unreachable if the URI cannot be reached.
++	 */
++	public static MetadataStore.ConnectionStatus testConnectionForStaticURL(RESTClientManager client, String uri) {
++		try {
++			Response resp = client.getAuthenticatedExecutor().execute(Request.Get(uri));
++			HttpResponse httpResponse = resp.returnResponse();
++			switch (httpResponse.getStatusLine().getStatusCode()) {
++			case HttpStatus.SC_NOT_FOUND:
++				return MetadataStore.ConnectionStatus.UNREACHABLE;
++			case HttpStatus.SC_OK:
++				return MetadataStore.ConnectionStatus.OK;
++			default:
++				;
++			}
++		} catch (Exception e) {
++			logger.log(Level.INFO, "Connection failed", e);
++		}
++		return MetadataStore.ConnectionStatus.UNKOWN_ERROR;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java
+new file mode 100755
+index 0000000..d48b6fe
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java
+@@ -0,0 +1,54 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++
++/**
++ * This class is used to cache the materialized version of a metadata reference, in order to reduce the number of retrievals required
++ *
++ */
++public class ReferenceCache {
++
++	private Annotation annotation;
++	private Column oMColumn;
++	private DataSet oMDataSet;
++
++	public Column getColumn() {
++		return oMColumn;
++	}
++
++	public void setColumn(Column oMColumn) {
++		this.oMColumn = oMColumn;
++	}
++
++	public DataSet getDataSet() {
++		return oMDataSet;
++	}
++
++	public void setDataSet(DataSet oMDataSet) {
++		this.oMDataSet = oMDataSet;
++	}
++
++	public Annotation getAnnotation() {
++		return annotation;
++	}
++
++	public void setAnnotation(Annotation annotation) {
++		this.annotation = annotation;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java
+new file mode 100755
+index 0000000..3567c1d
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java
+@@ -0,0 +1,385 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.net.URI;
++import java.net.URISyntaxException;
++import java.net.URLEncoder;
++import java.security.GeneralSecurityException;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Hashtable;
++import java.util.List;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.StatusLine;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.utils.URIBuilder;
++import org.apache.wink.json4j.JSON;
++import org.apache.wink.json4j.JSONArray;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.connectivity.RESTClientManager;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.json.JSONUtils;
++
++// TODO properly escape all URLs when constructed as string concatenation
++
++/**
++ * 
++ * A MetadataStore to access metadata via an ODF instance
++ *
++ */
++public class RemoteMetadataStore extends MetadataStoreBase implements MetadataStore {
++	private Logger logger = Logger.getLogger(RemoteMetadataStore.class.getName());
++
++	private String odfUrl;
++	private String odfUser;
++
++	private Properties mdsProps = null;
++	
++	// if this is true, null repository Ids are ok for all MetaDataObjectReference objects
++	private boolean isDefaultStore = true;
++
++	private RESTClientManager restClient;
++
++	static String ODF_API_INFIX = "/odf/api/v1";
++
++	private void constructThis(String odfUrl, String odfUser, String odfPassword, boolean isDefaultStore) throws URISyntaxException {
++		this.odfUrl = odfUrl;
++		this.odfUser = odfUser;
++		this.restClient = new RESTClientManager(new URI(odfUrl), odfUser, odfPassword);
++		this.isDefaultStore = isDefaultStore;
++	}
++
++	public RemoteMetadataStore(String odfUrl, String odfUser, String odfPassword, boolean isDefaultStore) throws URISyntaxException, MetadataStoreException {
++		constructThis(odfUrl, odfUser, odfPassword, isDefaultStore);
++	}
++
++	/**
++	 * check if the reference belongs to this repository. Throw exception if not.
++	 */
++	void checkReference(MetaDataObjectReference reference) {
++		if (reference == null) {
++			throw new MetadataStoreException("Reference cannot be null");
++		}
++		if (reference.getRepositoryId() == null) {
++			if (!isDefaultStore) {
++				throw new MetadataStoreException("Repository ID is not set on the reference.");
++			}
++		} else {
++			if (!reference.getRepositoryId().equals(this.getRepositoryId())) {
++				throw new MetadataStoreException(MessageFormat.format("Repository ID ''{0}'' of reference does not match the one of this repository ''{1}''",
++						new Object[] { reference.getRepositoryId(), getRepositoryId() }));
++			}
++		}
++	}
++	
++	/**
++	 * check if the ODF metadata API can be reached. Throw exception if not.
++	 */
++	private void checkConnectionToMetadataAPI() {
++		MetadataStore.ConnectionStatus connStatus = testConnection();
++		if (connStatus.equals(MetadataStore.ConnectionStatus.UNREACHABLE)) {
++			throw new MetadataStoreException("Internal API for metadata store cannot be reached. Make sure that the discovery service has access to the following URL: " + odfUrl);
++		} else if (connStatus.equals(MetadataStore.ConnectionStatus.AUTHORIZATION_FAILED)) {
++			String messageDetail ="";
++			if (this.odfUser.isEmpty()) {
++				messageDetail = " Make sure to connect to the discovery service securely through https.";
++				//Note that ODF user id and password are only provided if the connection to the service is secure
++			}
++			throw new MetadataStoreException("Autorization failure when accessing API of internal metadata store." + messageDetail);
++		}
++	}
++
++	@Override
++	public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
++		throw new UnsupportedOperationException("This method is not available in the remote implementation of the Metadata store.");
++	};
++
++	@Override
++	public MetaDataObject retrieve(MetaDataObjectReference reference) {
++		checkReference(reference);
++		checkConnectionToMetadataAPI();
++		try {
++			String resource = odfUrl + ODF_API_INFIX + "/metadata/asset/" + URLEncoder.encode(JSONUtils.toJSON(reference), "UTF-8");
++			logger.log(Level.FINEST, "Object reference to be retrieved ''{0}''.", reference.toString());
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code == HttpStatus.SC_NOT_FOUND) {
++				return null;
++			}
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Retrieval of object ''{0}'' failed: HTTP request status: ''{1}'', {2}",
++						new Object[] { JSONUtils.toJSON(reference), statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			} else {
++				JSONObject mdo = (JSONObject) JSON.parse(httpResponse.getEntity().getContent());
++				mdo.remove("annotations");
++				MetaDataObject result = JSONUtils.fromJSON(mdo.write(), MetaDataObject.class);
++				if (result.getReference() == null) {
++					// An empty JSON documents indicates that the result should be null.
++					result = null;
++				}
++				logger.log(Level.FINEST, "Retrieved metadata object: ''{0}''.", result);
++				return result;
++			}
++		} catch (GeneralSecurityException | IOException | JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++	
++	@Override
++	public Properties getProperties() {
++		if (this.mdsProps != null) {
++			return this.mdsProps; 
++		} else {
++			checkConnectionToMetadataAPI();
++			try {
++				String resource = odfUrl + ODF_API_INFIX + "/metadata";
++				Executor executor = this.restClient.getAuthenticatedExecutor();
++				HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
++				StatusLine statusLine = httpResponse.getStatusLine();
++				int code = statusLine.getStatusCode();
++				InputStream is = httpResponse.getEntity().getContent();
++				String response = JSONUtils.getInputStreamAsString(is, "UTF-8");
++				is.close();
++				if (code != HttpStatus.SC_OK) {
++					String msg = MessageFormat.format("Retrieval of metadata store properties at ''{3}'' failed: HTTP request status: ''{0}'', {1}, details: {2}",
++							new Object[] { code, statusLine.getReasonPhrase(), response,  resource});
++					throw new MetadataStoreException(msg);
++				} else {
++					this.mdsProps = new Properties();
++					JSONObject jo = new JSONObject(response);
++					for (Object key : jo.keySet()) {
++						this.mdsProps.put((String) key, (String) jo.get(key));
++					}
++					return this.mdsProps;
++				}
++			} catch (GeneralSecurityException | IOException | JSONException exc) {
++				logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++				throw new MetadataStoreException(exc);
++			}			
++		}
++	}
++
++	@Override
++	public List<MetaDataObjectReference> search(String query) {
++		checkConnectionToMetadataAPI();
++		try {
++			logger.log(Level.FINE, "Metadata search term: ''{0}''.", query);
++			URIBuilder uri = new URIBuilder(odfUrl + ODF_API_INFIX + "/metadata/search")
++					.addParameter("query", query)
++					.addParameter("resulttype", "references");
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Get(uri.build())).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
++			}
++			InputStream is = httpResponse.getEntity().getContent();
++			JSONArray objReferencesJson = new JSONArray(is);
++			is.close();
++			logger.log(Level.FINEST, "Metadata search response: ''{0}''.", objReferencesJson.write());
++			List<MetaDataObjectReference> resultMDORs = new ArrayList<>();
++			for (Object ref : objReferencesJson) {
++				MetaDataObjectReference objRef = JSONUtils.fromJSON(((JSONObject) ref).write(), MetaDataObjectReference.class);
++				resultMDORs.add(objRef);
++			}			
++			return resultMDORs;
++		} catch (GeneralSecurityException | IOException | URISyntaxException | JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to the metadata store.", exc);
++			throw new MetadataStoreException(exc);
++		}
++
++	}
++
++	@Override
++	public String getRepositoryId() {
++		Hashtable<Object, Object> mdsProps = (Hashtable<Object, Object>) this.getProperties();
++		if (mdsProps.get(STORE_PROPERTY_ID) != null) {
++			return (String) mdsProps.get(STORE_PROPERTY_ID);
++		} else {
++			throw new MetadataStoreException("Property " + STORE_PROPERTY_ID + " is missing from metadata store properties ''" + mdsProps.toString() + "''.");
++		}
++	}
++
++	@Override
++	public MetadataStore.ConnectionStatus testConnection() {
++		return RESTMetadataStoreHelper.testConnectionForStaticURL(restClient, odfUrl);
++	}
++
++	@Override
++	public void createSampleData() {
++		checkConnectionToMetadataAPI();
++		try {
++			String resource = odfUrl + ODF_API_INFIX + "/metadata/sampledata";
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Create sample data failed: HTTP request status: ''{1}'', {2}",
++						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			}
++		} catch (GeneralSecurityException | IOException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	@Override
++	public void resetAllData() {
++		checkConnectionToMetadataAPI();
++		try {
++			String resource = odfUrl + ODF_API_INFIX + "/metadata/resetalldata";
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Post(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Reset all data failed: HTTP request status: ''{1}'', {2}",
++						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			}
++		} catch (GeneralSecurityException | IOException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	@Override
++	public MetadataQueryBuilder newQueryBuilder() {
++		String repoType = getProperties().getProperty(STORE_PROPERTY_TYPE);
++		if ("atlas".equals(repoType)) {
++			return new AtlasMetadataQueryBuilder();
++		} else if ("default".equals(repoType)) {
++			return new DefaultMetadataQueryBuilder();
++		}
++		throw new RuntimeException(MessageFormat.format("No query builder exists for the repository type ''{0}''", repoType));
++	}
++
++	@Override
++	public AnnotationPropagator getAnnotationPropagator() {
++		throw new UnsupportedOperationException("This method is not available in the remote implementation of the Metadata store.");
++	}
++
++	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type){
++		String objectId = metaDataObject.getReference().getId();
++		checkConnectionToMetadataAPI();
++		try {
++			String resource = odfUrl + ODF_API_INFIX + "/metadata/asset/"
++				+ URLEncoder.encode(JSONUtils.toJSON(metaDataObject.getReference()), "UTF-8")
++				+ "/" + URLEncoder.encode(attributeName.toLowerCase(), "UTF-8");
++			logger.log(Level.FINEST, "Retrieving references of type ''{0}'' from metadata object id ''{1}''.", new Object[] { attributeName, objectId });
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code == HttpStatus.SC_NOT_FOUND) {
++				return null;
++			}
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Retrieving references of type ''{0}'' of object id ''{1}'' failed: HTTP request status: ''{2}'', {3}",
++						new Object[] { attributeName, objectId, statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			} else {
++				InputStream is = httpResponse.getEntity().getContent();
++				JSONArray objReferencesJson = new JSONArray(is);
++				is.close();
++				logger.log(Level.FINEST, "Get references response: ''{0}''.", objReferencesJson.write());
++				List<T> referencedObjects = new ArrayList<T>();
++				for (Object ref : objReferencesJson) {
++					T obj = JSONUtils.fromJSON(((JSONObject) ref).write(), type);
++					referencedObjects.add(obj);
++				}
++				return referencedObjects;
++			}
++		} catch (GeneralSecurityException | IOException | JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	@Override
++	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject){
++		return getReferences(attributeName, metaDataObject, MetaDataObject.class);
++	}
++
++	@Override
++	public List<String> getReferenceTypes(){
++		checkConnectionToMetadataAPI();
++		try {
++			String resource = odfUrl + ODF_API_INFIX + "/metadata/referencetypes";
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code == HttpStatus.SC_NOT_FOUND) {
++				return null;
++			}
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Retrieving reference type names failed: HTTP request status: ''{1}'', {2}",
++						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			} else {
++				InputStream is = httpResponse.getEntity().getContent();
++				JSONArray objReferencesJson = new JSONArray(is);
++				is.close();
++				logger.log(Level.FINEST, "Get reference types response: ''{0}''.", objReferencesJson.write());
++				List<String> referenceTypeNames = new ArrayList<String>();
++				for (Object ref : objReferencesJson) {
++					String obj = JSONUtils.fromJSON(((JSONObject) ref).write(), String.class);
++					referenceTypeNames.add(obj);
++				}			
++				return referenceTypeNames;
++			}
++		} catch (GeneralSecurityException | IOException | JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	@Override
++	public MetaDataObject getParent(MetaDataObject metaDataObject){
++		List<MetaDataObject> parentList = getReferences(InternalMetaDataUtils.ODF_PARENT_REFERENCE, metaDataObject, MetaDataObject.class);
++		if (parentList.size() == 1) {
++			return parentList.get(0);
++		} else if (parentList.size() == 0) {
++			return null;
++		}
++		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
++		throw new MetadataStoreException(errorMessage);
++	}
++
++	@Override
++	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject){
++		return getReferences(InternalMetaDataUtils.ODF_CHILDREN_REFERENCE, metaDataObject, MetaDataObject.class);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java
+new file mode 100755
+index 0000000..5de5f12
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java
+@@ -0,0 +1,61 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import java.util.HashMap;
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * Internal representation of a metadata object that is used by the @see MetaDataCache
++ * In addition to the object itself this class contains all references of the object. 
++ * 
++ * 
++ */
++@ApiModel(description="Internal representation of a metadata object in the metadata cache.")
++public class StoredMetaDataObject {
++	@ApiModelProperty(value="Actual cached metadata object", readOnly=false, required=true)
++	private MetaDataObject metaDataObject;
++
++	@ApiModelProperty(value="Map of all references of the cached metadata object containing one reference list for each type of reference", readOnly=false, required=true)
++	private HashMap<String, List<MetaDataObjectReference>> referenceMap;
++
++	public void setMetaDataObject(MetaDataObject metaDataObject) {
++		this.metaDataObject = metaDataObject;
++	}
++
++	public MetaDataObject getMetaDataObject() {
++		return this.metaDataObject;
++	}
++
++	public StoredMetaDataObject() {
++	}
++
++	public StoredMetaDataObject(MetaDataObject metaDataObject) {
++		this.metaDataObject = metaDataObject;
++		this.referenceMap = new HashMap<String, List<MetaDataObjectReference>>();
++	}
++
++	public void setReferencesMap(HashMap<String, List<MetaDataObjectReference>> referenceMap) {
++		this.referenceMap = referenceMap;
++	}
++
++	public HashMap<String, List<MetaDataObjectReference>> getReferenceMap() {
++		return this.referenceMap;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java
+new file mode 100755
+index 0000000..a6de68e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java
+@@ -0,0 +1,22 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownMetaDataObject extends MetaDataObject {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java
+new file mode 100755
+index 0000000..feaef91
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.importer;
++
++import java.util.List;
++
++public class JDBCMetadataImportResult {
++	private String databaseName;
++	private List<String> tableNames;
++	private String dbRef;
++	
++	public JDBCMetadataImportResult(String databaseName, String dbId, List<String> tableNames) {
++		super();
++		this.databaseName = databaseName;
++		this.tableNames = tableNames;
++		this.dbRef = dbId;
++	}
++	
++	public String getDBId() {
++		return this.dbRef;
++	}
++
++	public String getDatabaseName() {
++		return databaseName;
++	}
++
++	public List<String> getTableNames() {
++		return tableNames;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java
+new file mode 100755
+index 0000000..2127ce6
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java
+@@ -0,0 +1,36 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.importer;
++
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++
++/**
++ * Interface of the utility that imports metadata from JDBC data sources into the ODF metadata store.
++ * 
++ */
++public interface JDBCMetadataImporter {
++	
++	/**
++	 * Import metadata of one or multiple relational tables into the ODF metadata store, along with the corresponding
++	 * database and connection information.
++	 * 
++	 * @param connection Connection to the JDBC data soure
++	 * @param dbName Database name
++	 * @param schemaPattern Database schema name or pattern
++	 * @param tableNamePattern Table name or pattern
++	 * @return Object containing the raw results of the import operation
++	 */
++	public JDBCMetadataImportResult importTables(JDBCConnection connection, String dbName, String schemaPattern, String tableNamePattern);
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java
+new file mode 100755
+index 0000000..3e5cba3
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.importer;
++
++public class MetadataImportException extends RuntimeException {
++
++	private static final long serialVersionUID = -3502239943338011231L;
++
++	public MetadataImportException() {
++		super();
++	}
++
++	public MetadataImportException(String message) {
++		super(message);
++	}
++
++	public MetadataImportException(Throwable cause) {
++		super(cause);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java
+new file mode 100755
+index 0000000..b25ad8b
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java
+@@ -0,0 +1,61 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++// JSON
++/**
++ * This class represents a result of a discovery service analysis on a single metadata object.
++ * By extending this class new annotation types for new discovery services can be created in order to provide additional information
++ *
++ */
++public abstract class Annotation extends MetaDataObject {
++	
++	private String annotationType = this.getClass().getSimpleName().replace('$', '_');
++	private String analysisRun;
++	private String jsonProperties;
++	private String summary;
++
++	public String getAnnotationType() {
++		return annotationType;
++	}
++
++	public void setAnnotationType(String annotationType) {
++		this.annotationType = annotationType;
++	}
++
++	public String getJsonProperties() {
++		return jsonProperties;
++	}
++
++	public void setJsonProperties(String jsonProperties) {
++		this.jsonProperties = jsonProperties;
++	}
++
++	public String getAnalysisRun() {
++		return analysisRun;
++	}
++
++	public void setAnalysisRun(String analysisRun) {
++		this.analysisRun = analysisRun;
++	}
++
++	public String getSummary() {
++		return summary;
++	}
++
++	public void setSummary(String summary) {
++		this.summary = summary;
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java
+new file mode 100755
+index 0000000..cbc801f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java
+@@ -0,0 +1,44 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++public class BusinessTerm extends MetaDataObject {
++	
++	private List<String> abbreviations;
++	private String example;
++	private String usage;
++	
++	public List<String> getAbbreviations() {
++		return abbreviations;
++	}
++	public void setAbbreviations(List<String> abbreviations) {
++		this.abbreviations = abbreviations;
++	}
++	public String getExample() {
++		return example;
++	}
++	public void setExample(String example) {
++		this.example = example;
++	}
++	public String getUsage() {
++		return usage;
++	}
++	public void setUsage(String usage) {
++		this.usage = usage;
++	}
++
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java
+new file mode 100755
+index 0000000..5bbf731
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java
+@@ -0,0 +1,137 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.HashMap;
++import java.util.List;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.InternalMetadataStoreBase;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
++import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++
++/**
++ * In-memory metadata cache to be used by discovery services that do not have access to the metadata store.
++ * The cache uses the same interface as the metadata store but does not support all of its methods.
++ * 
++ * 
++ */
++public class CachedMetadataStore extends InternalMetadataStoreBase implements MetadataStore {
++	private Logger logger = Logger.getLogger(CachedMetadataStore.class.getName());
++	private static final String METADATA_STORE_ID = "ODF_METADATA_CACHE";
++	private static final String STORE_PROPERTY_TYPE = "cache";
++	private static final String STORE_PROPERTY_DESCRIPTION = "ODF metadata cache";
++
++	protected Object accessLock = new Object();
++	private HashMap<String, StoredMetaDataObject> objectStore =  new HashMap<String, StoredMetaDataObject>();
++	private HashMap<String, ConnectionInfo> connectionInfoStore = new HashMap<String, ConnectionInfo>();
++
++	public CachedMetadataStore(MetaDataCache metaDataCache) {
++		for (StoredMetaDataObject obj : metaDataCache.getMetaDataObjects()) {
++			getObjects().put(obj.getMetaDataObject().getReference().getId(), obj);
++			logger.log(Level.FINER, "Added object with name ''{0}'' to metadata cache.", obj.getMetaDataObject().getName());
++		}
++		for (ConnectionInfo conInfo : metaDataCache.getConnectionInfoObjects()) {
++			connectionInfoStore.put(conInfo.getAssetReference().getId(), conInfo);
++			logger.log(Level.FINER, "Added connection info object for metadata object id ''{0}'' to metadata cache.", conInfo.getAssetReference().getId());
++		}
++	}
++
++	protected Object getAccessLock() {
++		return accessLock;
++	}
++
++	public static MetaDataCache retrieveMetaDataCache(MetadataStore mds, MetaDataObject metaDataObject) {
++		MetaDataCache cache = new MetaDataCache();
++		populateMetaDataCache(cache, mds, metaDataObject);
++		return cache;
++	}
++	/**
++	 * Internal methods that recursively populates the metadata store with all child objects of a given metadata object.
++	 * If there is a @see ConnectionInfo object available for a cached metadata object
++	 * it will be added to the cache as well.
++	 *  
++	 * @param metaDataCache Metadata cache to be populated
++	 * @param mds Metadata store to retrieve the cached objects from
++	 * @param metaDataObject Given metadata object
++	 */
++	private static void populateMetaDataCache(MetaDataCache metaDataCache, MetadataStore mds, MetaDataObject metaDataObject) {
++		// Add current object
++		StoredMetaDataObject currentObject = new StoredMetaDataObject(metaDataObject);
++		for (String referenceType : mds.getReferenceTypes()) {
++			currentObject.getReferenceMap().put(referenceType, InternalMetaDataUtils.getReferenceList(mds.getReferences(referenceType, metaDataObject)));
++		}
++		metaDataCache.getMetaDataObjects().add(currentObject);
++		ConnectionInfo connectionInfo = mds.getConnectionInfo(metaDataObject);
++
++		// Connection info must be cached as well because it cannot be retrieved dynamically as required parent objects might be missing from cache
++		if (connectionInfo != null) {
++			metaDataCache.getConnectionInfoObjects().add(connectionInfo);
++		}
++
++		// Add child objects
++		for (MetaDataObject child : mds.getChildren(metaDataObject)) {
++			populateMetaDataCache(metaDataCache, mds, child);
++		}
++	}
++
++	protected HashMap<String, StoredMetaDataObject> getObjects() {
++		return objectStore;
++	}
++
++	@Override
++	public Properties getProperties() {
++		Properties props = new Properties();
++		props.put(MetadataStore.STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_DESCRIPTION);
++		props.put(MetadataStore.STORE_PROPERTY_TYPE, STORE_PROPERTY_TYPE);
++		props.put(STORE_PROPERTY_ID, METADATA_STORE_ID);
++		return props;
++	}
++
++	@Override
++	public void resetAllData() {
++		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
++	}
++
++	@Override
++	public String getRepositoryId() {
++		return METADATA_STORE_ID;
++	}
++
++	@Override
++	public ConnectionInfo getConnectionInfo(MetaDataObject metaDataObject) {
++		return connectionInfoStore.get(metaDataObject.getReference().getId());
++	}
++
++	@Override
++	public List<MetaDataObjectReference> search(String query) {
++		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
++	}
++
++	@Override
++	public void createSampleData() {
++		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
++	}
++
++	@Override
++	public AnnotationPropagator getAnnotationPropagator() {
++		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java
+new file mode 100755
+index 0000000..8db6ec1
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java
+@@ -0,0 +1,38 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++public class ClassificationAnnotation extends Annotation {
++	
++	private MetaDataObjectReference classifiedObject;
++	private List<MetaDataObjectReference> classifyingObjects;
++
++	public MetaDataObjectReference getClassifiedObject() {
++		return classifiedObject;
++	}
++	public void setClassifiedObject(MetaDataObjectReference classifiedObject) {
++		this.classifiedObject = classifiedObject;
++	}
++	
++	public List<MetaDataObjectReference> getClassifyingObjects() {
++		return classifyingObjects;
++	}
++	public void setClassifyingObjects(List<MetaDataObjectReference> classifyingObjects) {
++		this.classifyingObjects = classifyingObjects;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java
+new file mode 100755
+index 0000000..0ae4370
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * This class represents metadata of a column in a table
++ *
++ */
++public class Column extends MetaDataObject {
++
++	private String dataType;
++
++	public String getDataType() {
++		return dataType;
++	}
++
++	public void setDataType(String dataType) {
++		this.dataType = dataType;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java
+new file mode 100755
+index 0000000..9a42fc2
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java
+@@ -0,0 +1,18 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++public abstract class Connection extends MetaDataObject {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java
+new file mode 100755
+index 0000000..4884105
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java
+@@ -0,0 +1,64 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * 
++ * General connecting info that must be extended for individual data sources 
++ *
++ */
++@ApiModel(description="Object containing the information required in order to access the data behind a specific metadata object.")
++public abstract class ConnectionInfo {
++
++	@ApiModelProperty(value="Available connections for accessing the data behind the metadata object", readOnly=true, required=true)
++	private List<Connection> connections;
++
++	@ApiModelProperty(value="Reference to the actual metadata object", readOnly=true, required=true)
++	private MetaDataObjectReference assetReference;
++
++	@ApiModelProperty(value="Java class represeting the connection info object", hidden=true)
++	private String javaClass = this.getClass().getName(); // don't use JsonTypeInfo 
++
++	public List<Connection> getConnections() {
++		return this.connections;
++	}
++
++	public void setConnections(List<Connection> connections) {
++		this.connections = connections;
++	}
++
++	public MetaDataObjectReference getAssetReference() {
++		return this.assetReference;
++	}
++
++	public void setAssetReference(MetaDataObjectReference assetReference) {
++		this.assetReference = assetReference;
++	}
++
++	public String getJavaClass() {
++		return javaClass;
++	}
++
++	public void setJavaClass(String javaClass) {
++		this.javaClass = javaClass;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java
+new file mode 100755
+index 0000000..2a1ad7f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * This class is a metadataobject for a CSV file located at a specific URL
++ *
++ */
++public class DataFile extends RelationalDataSet {
++	private String encoding = "UTF-8";
++	private String urlString;
++
++	public String getUrlString() {
++		return urlString;
++	}
++
++	public void setUrlString(String url) {
++		this.urlString = url;
++	}
++
++	public String getEncoding() {
++		return encoding;
++	}
++
++	public void setEncoding(String encoding) {
++		this.encoding = encoding;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java
+new file mode 100755
+index 0000000..5e2a132
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java
+@@ -0,0 +1,18 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++public class DataFileFolder extends MetaDataObject {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java
+new file mode 100755
+index 0000000..0b75eae
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java
+@@ -0,0 +1,22 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import io.swagger.annotations.ApiModel;
++
++// JSON
++@ApiModel(description="Metadata object representing a generic data set.")
++public abstract class DataSet extends MetaDataObject {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java
+new file mode 100755
+index 0000000..f4a11bd
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java
+@@ -0,0 +1,35 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++/**
++ * This class represents a metadataobject that references other metadataobjects
++ *
++ */
++public abstract class DataStore extends MetaDataObject {
++	private List<MetaDataObjectReference> connections;
++
++	public List<MetaDataObjectReference> getConnections() {
++		return connections;
++	}
++
++	public void setConnections(List<MetaDataObjectReference> connections) {
++		this.connections = connections;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java
+new file mode 100755
+index 0000000..bbf7a0a
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java
+@@ -0,0 +1,31 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * This class is a metadata object for a relational database
++ *
++ */
++public class Database extends DataStore {
++	private String dbType;
++
++	public String getDbType() {
++		return dbType;
++	}
++
++	public void setDbType(String dbType) {
++		this.dbType = dbType;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java
+new file mode 100755
+index 0000000..ed6c0ef
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java
+@@ -0,0 +1,41 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * 
++ * This class represents a text document stored at a URL
++ *
++ */
++public class Document extends DataSet {
++	private String encoding = "UTF-8";
++	private String urlString;
++
++	public String getUrlString() {
++		return urlString;
++	}
++
++	public void setUrlString(String url) {
++		this.urlString = url;
++	}
++
++	public String getEncoding() {
++		return encoding;
++	}
++
++	public void setEncoding(String encoding) {
++		this.encoding = encoding;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java
+new file mode 100755
+index 0000000..0a96fb1
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java
+@@ -0,0 +1,49 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * This class represents a JDCB connection that can be used to access the contents of a dataset
++ *
++ */
++public class JDBCConnection extends Connection {
++	private String jdbcConnectionString;
++	private String user;
++	private String password;
++
++	public String getJdbcConnectionString() {
++		return jdbcConnectionString;
++	}
++
++	public void setJdbcConnectionString(String jdbcConnectionString) {
++		this.jdbcConnectionString = jdbcConnectionString;
++	}
++
++	public String getUser() {
++		return user;
++	}
++
++	public void setUser(String user) {
++		this.user = user;
++	}
++
++	public String getPassword() {
++		return password;
++	}
++
++	public void setPassword(String password) {
++		this.password = password;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java
+new file mode 100755
+index 0000000..131f3f9
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java
+@@ -0,0 +1,49 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * 
++ * Provides all information for connecting to a JDBC data source
++ *
++ */
++@ApiModel(description="Object containing the information required to access a specific JDBC table.")
++public class JDBCConnectionInfo extends ConnectionInfo {
++
++	@ApiModelProperty(value="Table name", readOnly=true, required=true)
++	private String tableName;
++
++	@ApiModelProperty(value="Schema name", readOnly=true, required=true)
++	private String schemaName;
++
++	public String getTableName() {
++		return this.tableName;
++	}
++
++	public void setTableName(String tableName) {
++		this.tableName = tableName;
++	}
++
++	public String getSchemaName() {
++		return this.schemaName;
++	}
++
++	public void setSchemaName(String schemaName) {
++		this.schemaName = schemaName;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java
+new file mode 100755
+index 0000000..ff2f47f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java
+@@ -0,0 +1,47 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++
++import io.swagger.annotations.ApiModelProperty;
++
++public class MetaDataCache {
++
++	@ApiModelProperty(value="Cached metadata objects to be used by discovery services if access to the metadata store is not available", required=false)
++	private List<StoredMetaDataObject> metaDataObjects = new ArrayList<StoredMetaDataObject>();
++
++	@ApiModelProperty(value="Cached connection info objects to be used by discovery services if access to the metadata store is not available", required=false)
++	private List<ConnectionInfo> connectionInfoObjects = new ArrayList<ConnectionInfo>();
++
++	public List<StoredMetaDataObject> getMetaDataObjects() {
++		return metaDataObjects;
++	}
++
++	public void setMetaDataObjects(List<StoredMetaDataObject> metaDataObjects) {
++		this.metaDataObjects = metaDataObjects;
++	}
++
++	public List<ConnectionInfo> getConnectionInfoObjects() {
++		return this.connectionInfoObjects;
++	}
++
++	public void setConnectionInfoObjects(List<ConnectionInfo> connectionInfoObjects) {
++		this.connectionInfoObjects = connectionInfoObjects;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java
+new file mode 100755
+index 0000000..6152d51
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java
+@@ -0,0 +1,96 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++// JSON
++/**
++ * 
++ * A MetaDataObject is an object in a Metadata store, containing a reference describing its location and annotations that were created on this object
++ *
++ */
++@ApiModel(description="Metadata object representing a generic data set.")
++public abstract class MetaDataObject {
++
++	@ApiModelProperty(value="Reference to the object (generated)", readOnly=true, required=true)
++	private MetaDataObjectReference reference;
++
++	@ApiModelProperty(value="Description of the object", required=false)
++	private String description;
++
++	@ApiModelProperty(value="Name of the object", required=true)
++	private String name;
++
++	@ApiModelProperty(value="Java class represeting the object", hidden=true)
++	private String javaClass = this.getClass().getName(); // don't use JsonTypeInfo 
++	
++	private String originRef;
++	
++	private List<String> replicaRefs;
++
++	public String getDescription() {
++		return description;
++	}
++
++	public void setDescription(String description) {
++		this.description = description;
++	}
++
++	public String getName() {
++		return name;
++	}
++
++	public void setName(String name) {
++		this.name = name;
++	}
++
++	public MetaDataObjectReference getReference() {
++		return reference;
++	}
++
++	public void setReference(MetaDataObjectReference reference) {
++		this.reference = reference;
++	}
++
++	public String getJavaClass() {
++		return javaClass;
++	}
++
++	public void setJavaClass(String javaClass) {
++		this.javaClass = javaClass;
++	}
++
++	public String getOriginRef() {
++		return originRef;
++	}
++
++	public void setOriginRef(String originRef) {
++		this.originRef = originRef;
++	}
++
++	public List<String> getReplicaRefs() {
++		return replicaRefs;
++	}
++
++	public void setReplicaRefs(List<String> replicaRefs) {
++		this.replicaRefs = replicaRefs;
++	}
++	
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java
+new file mode 100755
+index 0000000..8e6fcca
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java
+@@ -0,0 +1,28 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++public class ProfilingAnnotation extends Annotation {
++
++	private MetaDataObjectReference profiledObject;
++
++	public MetaDataObjectReference getProfiledObject() {
++		return profiledObject;
++	}
++	public void setProfiledObject(MetaDataObjectReference annotatedObject) {
++		this.profiledObject = annotatedObject;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java
+new file mode 100755
+index 0000000..e8656c5
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java
+@@ -0,0 +1,24 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++
++/**
++ * 
++ * This class represents a metadataobject that contains columns, eg. a database table or a CSV file
++ *
++ */
++public abstract class RelationalDataSet extends DataSet {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java
+new file mode 100755
+index 0000000..924dadf
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java
+@@ -0,0 +1,31 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++public class RelationshipAnnotation extends Annotation {
++	
++	private List<MetaDataObjectReference> relatedObjects;
++
++	public List<MetaDataObjectReference> getRelatedObjects() {
++		return relatedObjects;
++	}
++
++	public void setRelatedObjects(List<MetaDataObjectReference> relatedObjects) {
++		this.relatedObjects = relatedObjects;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java
+new file mode 100755
+index 0000000..e74bda4
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java
+@@ -0,0 +1,18 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++public class Schema extends MetaDataObject {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java
+new file mode 100755
+index 0000000..633706d
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java
+@@ -0,0 +1,23 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++/**
++ * 
++ * This class represents a database table metadataobject
++ *
++ */
++public class Table extends RelationalDataSet {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java
+new file mode 100755
+index 0000000..3a1a968
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java
+@@ -0,0 +1,21 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownConnection extends Connection {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java
+new file mode 100755
+index 0000000..57da8d3
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java
+@@ -0,0 +1,21 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownConnectionInfo extends ConnectionInfo {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java
+new file mode 100755
+index 0000000..948bf3f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java
+@@ -0,0 +1,22 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownDataSet extends DataSet {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java
+new file mode 100755
+index 0000000..d07537e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java
+@@ -0,0 +1,21 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownDataStore extends DataStore {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java
+new file mode 100755
+index 0000000..6993751
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java
+@@ -0,0 +1,21 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.metadata.models;
++
++import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
++
++@JsonIgnoreProperties(ignoreUnknown = true)
++public class UnknownRelationalDataSet extends RelationalDataSet {
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java
+new file mode 100755
+index 0000000..08ca741
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java
+@@ -0,0 +1,74 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import org.apache.atlas.odf.api.settings.validation.EnumValidator;
++import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++/*
++ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
++ */
++public final class KafkaConsumerConfig {
++	/*
++	 * ############ !!!!!!!!!!!!!!!!!!! ###################
++	 * 
++	 * Because of a jackson defect, JsonProperty annotations must be on all properties AND their getters and setters!
++	 * 
++	 * https://github.com/FasterXML/jackson-module-scala/issues/197
++	 */
++
++	private String offsetsStorage;
++
++	private Long zookeeperSessionTimeoutMs;
++
++	private Long zookeeperConnectionTimeoutMs;
++
++	public String getOffsetsStorage() {
++		return offsetsStorage;
++	}
++
++	public void setOffsetsStorage(String offsetsStorage) {
++		this.offsetsStorage = offsetsStorage;
++	}
++
++	public Long getZookeeperSessionTimeoutMs() {
++		return zookeeperSessionTimeoutMs;
++	}
++
++	public void setZookeeperSessionTimeoutMs(Long zookeeperSessionTimeoutMs) {
++		this.zookeeperSessionTimeoutMs = zookeeperSessionTimeoutMs;
++	}
++
++	public Long getZookeeperConnectionTimeoutMs() {
++		return zookeeperConnectionTimeoutMs;
++	}
++
++	public void setZookeeperConnectionTimeoutMs(Long zookeeperConnectionTimeoutMs) {
++		this.zookeeperConnectionTimeoutMs = zookeeperConnectionTimeoutMs;
++	}
++
++	public void validate() throws ValidationException {
++		if (getOffsetsStorage() != null) {
++			new EnumValidator("kafka", "zookeeper").validate("KafkaConsumerConfig.offsetsStorage", this.offsetsStorage);
++		}
++		if (getZookeeperConnectionTimeoutMs() != null) {
++			new NumberPositiveValidator().validate("KafkaConsumerConfig.zookeeperConnectionTimeoutMs", this.zookeeperConnectionTimeoutMs);
++		}
++		if (getZookeeperSessionTimeoutMs() != null) {
++			new NumberPositiveValidator().validate("KafkaConsumerConfig.zookeeperSessionTimeoutMs", this.zookeeperSessionTimeoutMs);
++		}
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java
+new file mode 100755
+index 0000000..3c1725e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java
+@@ -0,0 +1,63 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++public class KafkaMessagingConfiguration extends MessagingConfiguration {
++	private KafkaConsumerConfig kafkaConsumerConfig;
++
++	private Integer queueConsumerWaitMs;
++
++	private Integer kafkaBrokerTopicReplication;
++
++	public Integer getKafkaBrokerTopicReplication() {
++		return kafkaBrokerTopicReplication;
++	}
++
++	public void setKafkaBrokerTopicReplication(Integer kafkaBrokerTopicReplication) {
++		this.kafkaBrokerTopicReplication = kafkaBrokerTopicReplication;
++	}
++
++	public Integer getQueueConsumerWaitMs() {
++		return queueConsumerWaitMs;
++	}
++
++	public void setQueueConsumerWaitMs(Integer queueConsumerWaitMs) {
++		this.queueConsumerWaitMs = queueConsumerWaitMs;
++	}
++
++	public KafkaConsumerConfig getKafkaConsumerConfig() {
++		return kafkaConsumerConfig;
++	}
++
++	public void setKafkaConsumerConfig(KafkaConsumerConfig kafkaConsumerConfig) {
++		this.kafkaConsumerConfig = kafkaConsumerConfig;
++	}
++
++	public void validate() throws ValidationException {
++		super.validate();
++		if (this.getQueueConsumerWaitMs() != null) {
++			new NumberPositiveValidator().validate("ODFConfig.queueConsumerWaitMs", this.queueConsumerWaitMs);
++		}
++		if (this.getKafkaBrokerTopicReplication() != null) {
++			new NumberPositiveValidator().validate("ODFConfig.kafkaBrokerTopicReplication", this.kafkaBrokerTopicReplication);
++		}
++		if (this.getKafkaConsumerConfig() != null) {
++			this.kafkaConsumerConfig.validate();
++		}
++
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java
+new file mode 100755
+index 0000000..ba006e3
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import com.fasterxml.jackson.annotation.JsonTypeInfo;
++import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++@ApiModel(description="Messaging configuration to be used for queuing requests.")
++@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "type")
++public abstract class MessagingConfiguration {
++	@ApiModelProperty(value="Time analysis requests are retained on the queue in milliseconds")
++	private Long analysisRequestRetentionMs;
++	
++	public Long getAnalysisRequestRetentionMs() {
++		return analysisRequestRetentionMs;
++	}
++
++	public void setAnalysisRequestRetentionMs(Long analysisRequestRetentionMs) {
++		this.analysisRequestRetentionMs = analysisRequestRetentionMs;
++	}
++	
++	public void validate() throws ValidationException {
++		if (this.getAnalysisRequestRetentionMs() != null) {
++			new NumberPositiveValidator().validate("ODFConfig.analysisRequestRetentionMs", this.analysisRequestRetentionMs);
++		}
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java
+new file mode 100755
+index 0000000..2124c54
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java
+@@ -0,0 +1,206 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import java.util.Map;
++import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
++import org.apache.atlas.odf.api.settings.validation.StringNotEmptyValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/*
++ * 
++ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
++ * Boolean properties must be of type Boolean instead of boolean in order to support null values which are required for merging later!
++ *
++ */
++@ApiModel(description="General ODF settings.")
++public final class ODFSettings {
++
++	/*
++	 * ############ !!!!!!!!!!!!!!!!!!! ####################
++	 *
++	 * Because of a jackson defect, JsonProperty annotations must be on all properties AND their getters and setters!
++	 *
++	 * https://github.com/FasterXML/jackson-module-scala/issues/197
++	 */
++
++	@ApiModelProperty(value="Polling interval for checking whether a discovery service is still running")
++	private Integer discoveryServiceWatcherWaitMs;
++
++	@ApiModelProperty(value="Unique id of the ODF instance")
++	private String instanceId;
++
++	@ApiModelProperty(value="ODF URL passed to discovery services for remote access to the metadata API")
++	private String odfUrl;
++
++	@ApiModelProperty(value="ODF user id passed to discovery services for remote access to the metadata API")
++	private String odfUser;
++
++	@ApiModelProperty(value="ODF password passed to discovery services for remote access to the metadata API")
++	private String odfPassword;
++
++	@ApiModelProperty(value = "ATLAS setting indicating if events regarding newly imported data sets should be consumed from me")
++	private Boolean consumeMessageHubEvents;
++
++	@ApiModelProperty(value = "ATLAS Messagehub VCAP_SERVICES value from Bluemix,  e.g { \"messagehub\": [{\"name\" : \"...\",\n\"credentials\": {...}]")
++	private String atlasMessagehubVcap;
++
++	@ApiModelProperty(value="Indicates whether to reuse equivalent analysis requests that may be already queued rather that running the same analysis again")
++	private Boolean reuseRequests;
++
++	@ApiModelProperty(value="Messaging configuration to be used for queuing requests")
++	private MessagingConfiguration messagingConfiguration;
++
++	@ApiModelProperty(value="If set to true, ALL registered discovery services will be automatically issued when a new data set is imported")
++	private Boolean runAnalysisOnImport;
++
++	@ApiModelProperty(value="If set to true, ALL data sets will be automatically analyzed whenever a new discovery service is registered")
++	private Boolean runNewServicesOnRegistration;
++
++	@ApiModelProperty(value="User-defined configuration options for discovery services", required=true)
++	private Map<String, Object> userDefined;
++
++	@ApiModelProperty(value="Spark clusters to be used for running discovery services", required=true)
++	private SparkConfig sparkConfig;
++
++	@ApiModelProperty(value = "Set to true to propagate the created annotations for each analysis request to the metadata store")
++	private Boolean enableAnnotationPropagation;
++
++	public Boolean getEnableAnnotationPropagation() {
++		return enableAnnotationPropagation;
++	}
++
++	public void setEnableAnnotationPropagation(Boolean enableAnnotationPropagation) {
++		this.enableAnnotationPropagation = enableAnnotationPropagation;
++	}
++
++	public Boolean isReuseRequests() {
++		return reuseRequests;
++	}
++
++	public void setReuseRequests(Boolean reuseRequests) {
++		this.reuseRequests = reuseRequests;
++	}
++
++	public String getInstanceId() {
++		return this.instanceId;
++	}
++
++	public void setInstanceId(String instanceId) {
++		this.instanceId = instanceId;
++	}
++
++	public String getOdfUrl() {
++		return this.odfUrl;
++	}
++
++	public void setOdfUrl(String odfUrl) {
++		this.odfUrl = odfUrl;
++	}
++
++	public String getOdfUser() {
++		return this.odfUser;
++	}
++
++	public void setOdfUser(String odfUser) {
++		this.odfUser = odfUser;
++	}
++
++	public String getOdfPassword() {
++		return this.odfPassword;
++	}
++
++	public void setOdfPassword(String odfPassword) {
++		this.odfPassword = odfPassword;
++	}
++
++	public Integer getDiscoveryServiceWatcherWaitMs() {
++		return discoveryServiceWatcherWaitMs;
++	}
++
++	public void setDiscoveryServiceWatcherWaitMs(Integer discoveryServiceWatcherWaitMs) {
++		this.discoveryServiceWatcherWaitMs = discoveryServiceWatcherWaitMs;
++	}
++
++	public Boolean getRunAnalysisOnImport() {
++		return runAnalysisOnImport;
++	}
++
++	public void setRunAnalysisOnImport(Boolean runAnalysisOnImport) {
++		this.runAnalysisOnImport = runAnalysisOnImport;
++	}
++
++	public Boolean getRunNewServicesOnRegistration() {
++		return runNewServicesOnRegistration;
++	}
++
++	public void setRunNewServicesOnRegistration(Boolean runNewServicesOnRegistration) {
++		this.runNewServicesOnRegistration = runNewServicesOnRegistration;
++	}
++
++	public MessagingConfiguration getMessagingConfiguration() {
++		return messagingConfiguration;
++	}
++
++	public void setMessagingConfiguration(MessagingConfiguration messagingConfiguration) {
++		this.messagingConfiguration = messagingConfiguration;
++	}
++
++	public String getAtlasMessagehubVcap() {
++		return atlasMessagehubVcap;
++	}
++
++	public void setAtlasMessagehubVcap(String atlasMessagehubVcap) {
++		this.atlasMessagehubVcap = atlasMessagehubVcap;
++	}
++
++	public Map<String, Object> getUserDefined() {
++		return userDefined;
++	}
++
++	public Boolean getConsumeMessageHubEvents() {
++		return consumeMessageHubEvents;
++	}
++
++	public void setConsumeMessageHubEvents(Boolean consumeMessageHubEvents) {
++		this.consumeMessageHubEvents = consumeMessageHubEvents;
++	}
++
++	public void setUserDefined(Map<String, Object> userDefined) {
++		this.userDefined = userDefined;
++	}
++
++	public SparkConfig getSparkConfig() {
++		return sparkConfig;
++	}
++
++	public void setSparkConfig(SparkConfig sparkConfig) {
++		this.sparkConfig = sparkConfig;
++	}
++
++	public void validate() throws ValidationException {
++		new StringNotEmptyValidator().validate("ODFConfig.instanceId", instanceId);
++
++		if (this.getDiscoveryServiceWatcherWaitMs() != null) {
++			new NumberPositiveValidator().validate("ODFConfig.discoveryServiceWatcherWaitMs", this.discoveryServiceWatcherWaitMs);
++		}
++
++		if (this.messagingConfiguration != null) {
++			this.messagingConfiguration.validate();
++		}
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java
+new file mode 100755
+index 0000000..9c300b9
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java
+@@ -0,0 +1,79 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import java.util.Map;
++import java.util.Properties;
++
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++/**
++*
++* External Java API for reading and updating ODF settings
++*
++*/
++public interface SettingsManager {
++
++	/**
++	 * Retrieve Kafka consumer properties
++	 * @return Current Kafka consumer properties
++	 */
++	public Properties getKafkaConsumerProperties();
++
++	/**
++	 * Retrieve Kafka producer properties
++	 * @return Current Kafka producer properties
++	 */
++	public Properties getKafkaProducerProperties();
++
++	/**
++	 * Retrieve overall ODF settings including plain passwords
++	 * @return Current ODF settings
++	 */
++	public ODFSettings getODFSettings();
++
++	/**
++	 * Retrieve overall ODF settings with hidden passwords
++	 * @return Current ODF settings
++	 */
++	public ODFSettings getODFSettingsHidePasswords();
++
++	/**
++	 * Update ODF settings
++	 * 
++	 * Passwords provided as plain text will be encrypted. If HIDDEN_PASSWORD_IDENTIFIER
++	 * is provided instead of a password, the stored password will remain unchanged.
++	 * 
++	 * @param Updated ODF settings
++	 */
++	public void updateODFSettings(ODFSettings update) throws ValidationException;
++
++	/**
++	 * Reset ODF settings to the defaults
++	 */
++	public void resetODFSettings();
++
++	/**
++	 * Retrieve user defined ODF properties
++	 * @return Map of user defined ODF properties
++	 */
++	public Map<String, Object> getUserDefinedConfig();
++
++	/**
++	 * Update user defined ODF properties
++	 * @param Map of user defined ODF properties
++	 * @throws ValidationException
++	 */
++	public void updateUserDefined(Map<String, Object> update) throws ValidationException;
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java
+new file mode 100755
+index 0000000..5c3694c
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java
+@@ -0,0 +1,52 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings;
++
++import java.util.HashMap;
++import java.util.Map;
++
++import com.fasterxml.jackson.annotation.JsonAnyGetter;
++import com.fasterxml.jackson.annotation.JsonAnySetter;
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++//JSON
++@ApiModel(description = "Configuration of Spark cluster.")
++public class SparkConfig {
++
++	@ApiModelProperty(value = "Master URL of the Spark cluster", required = true)
++	private String clusterMasterUrl = null;
++
++	@ApiModelProperty(value="Custom Spark configuration options", required=false)
++	private Map<String, Object> configs = new HashMap<>();
++
++	public String getClusterMasterUrl() {
++		return this.clusterMasterUrl;
++	}
++
++	public void setClusterMasterUrl(String clusterMasterUrl) {
++		this.clusterMasterUrl = clusterMasterUrl;
++	}
++
++	@JsonAnyGetter
++	public Map<String, Object> getConfigs() {
++		return this.configs;
++	}
++
++	@JsonAnySetter
++	public void setConfig(String name, Object value) {
++		this.configs.put(name, value);
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java
+new file mode 100755
+index 0000000..c6c365f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java
+@@ -0,0 +1,37 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++import java.text.MessageFormat;
++
++public class EnumValidator implements PropertyValidator {
++
++	String[] validValues = new String[0];
++
++	public EnumValidator(String... validValues) {
++		this.validValues = validValues;
++	}
++
++	@Override
++	public void validate(String property, Object value) throws ValidationException {
++		for (String valid : validValues) {
++			if (valid.equals(value)) {
++				return;
++			}
++		}
++
++		throw new ValidationException(property, MessageFormat.format("only the following values are allowed: ", validValues.toString()));
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java
+new file mode 100755
+index 0000000..ad2662e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++import java.text.MessageFormat;
++
++public class ImplementationValidator implements PropertyValidator {
++
++	public void validate(String property, Object value) throws ValidationException {
++		Class<?> implClass;
++		try {
++			implClass = this.getClass().getClassLoader().loadClass(String.valueOf(value));
++			Object o = implClass.newInstance();
++			o.toString();
++			return;
++		} catch (ClassNotFoundException e) {
++			e.printStackTrace();
++			throw new ValidationException(property, MessageFormat.format("Class {0} could not be found!", value));
++		} catch (IllegalAccessException e) {
++			e.printStackTrace();
++			throw new ValidationException(property, MessageFormat.format("Class {0} could not be accessed!", value));
++		} catch (InstantiationException e) {
++			e.printStackTrace();
++			throw new ValidationException(property, MessageFormat.format("Class {0} could not be instantiated!", value));
++		} catch (NoClassDefFoundError e) {
++			e.printStackTrace();
++			throw new ValidationException(property, MessageFormat.format("Class defintiion {0} could not be found!", value));
++		}
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java
+new file mode 100755
+index 0000000..3c09f07
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++public class NumberPositiveValidator implements PropertyValidator {
++
++	public void validate(String property, Object value) throws ValidationException {
++		if (!(value instanceof Number)) {
++			throw new ValidationException("Only numbers are allowed!");
++		} else {
++			if (value instanceof Long && (long) value < 0) {
++				throw new ValidationException(property, "Only positive values are allowed!");
++			} else if (value instanceof Integer && (int) value < 0) {
++				throw new ValidationException(property, "Only positive values are allowed!");
++			} else if (value instanceof Double && (double) value < 0) {
++				throw new ValidationException(property, "Only positive values are allowed!");
++			}
++		}
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java
+new file mode 100755
+index 0000000..6acb902
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java
+@@ -0,0 +1,20 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++public interface PropertyValidator {
++
++	public void validate(String property, Object value) throws ValidationException;
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java
+new file mode 100755
+index 0000000..c12cf59
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java
+@@ -0,0 +1,27 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++import java.text.MessageFormat;
++
++public class StringNotEmptyValidator implements PropertyValidator {
++
++	@Override
++	public void validate(String property, Object value) throws ValidationException {
++		if (value != null && value.toString().trim().isEmpty()) {
++			throw new ValidationException(MessageFormat.format("The property {0} is required and cannot be empty", property));
++		}
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java
+new file mode 100755
+index 0000000..b560e9f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java
+@@ -0,0 +1,63 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.settings.validation;
++
++import java.text.MessageFormat;
++
++public class ValidationException extends Exception {
++
++	/**
++	 * 
++	 */
++	private static final long serialVersionUID = 485240669635915916L;
++	private String property;
++	private String errorCause;
++
++	public ValidationException(String property, String errorMessage) {
++		this.errorCause = errorMessage;
++		this.property = property;
++	}
++
++	public ValidationException(String errorCause) {
++		this.errorCause = errorCause;
++	}
++
++	@Override
++	public String getMessage() {
++		if (property != null && errorCause != null) {
++			return MessageFormat.format("Error setting property {0}, {1}", property, errorCause);
++		} else if (errorCause != null) {
++			return MessageFormat.format("Error setting property value, {0}", errorCause);
++		} else {
++			return "Error setting property value.";
++		}
++	}
++
++	public String getProperty() {
++		return property;
++	}
++
++	public void setProperty(String property) {
++		this.property = property;
++	}
++
++	public String getErrorCause() {
++		return errorCause;
++	}
++
++	public void setErrorCause(String error) {
++		this.errorCause = error;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java
+new file mode 100755
+index 0000000..cc218fb
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java
+@@ -0,0 +1,34 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.spark;
++
++import org.apache.spark.sql.SparkSession;
++
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++
++/**
++ * Interface to be implemented by generic Spark discovery services.
++ * 
++ *
++ */
++
++public interface SparkDiscoveryService extends SyncDiscoveryService {
++
++    /**
++     * Sets the Spark context to be used for processing the discovery service.
++     * 
++     */
++	void setSparkSession(SparkSession spark);
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java
+new file mode 100755
+index 0000000..28c7831
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java
+@@ -0,0 +1,34 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.spark;
++
++import org.apache.spark.sql.SparkSession;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
++
++public abstract class SparkDiscoveryServiceBase extends SyncDiscoveryServiceBase implements SparkDiscoveryService  {
++	protected SparkSession spark;
++	protected MetadataStore mds;
++
++	@Override
++	public void setSparkSession(SparkSession spark) {
++		this.spark = spark;
++	}
++
++	@Override
++	public void setMetadataStore(MetadataStore mds) {
++		this.mds = mds;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java
+new file mode 100755
+index 0000000..d443303
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.spark;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++/**
++ * Internal interface to be used for processing Spark discovery services.
++ * 
++ *
++ */
++
++public interface SparkServiceExecutor {
++
++	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request);
++
++	DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer);
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java
+new file mode 100755
+index 0000000..50cb09f
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java
+@@ -0,0 +1,308 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.spark;
++
++import java.sql.DriverManager;
++import java.sql.SQLException;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.spark.sql.Dataset;
++import org.apache.spark.sql.Row;
++import org.apache.spark.sql.RowFactory;
++import org.apache.spark.sql.SparkSession;
++import org.apache.spark.sql.types.DataType;
++import org.apache.spark.sql.types.DataTypes;
++import org.apache.spark.sql.types.StructField;
++import org.apache.spark.sql.types.StructType;
++import org.apache.spark.util.Utils;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++
++/**
++ * Provides a number of helper methods, mainly for working with Spark data frames.
++ * 
++ *
++ */
++
++public class SparkUtils {
++	static Logger logger = Logger.getLogger(SparkUtils.class.getName());
++
++    /**
++     * Creates a Spark data frame from a data set reference stored in a data set container.
++     * 
++     * @param sc Current Spark context
++     * @param request dsc Data set container that keeps the reference to the input data set
++     * @return Resulting Spark data frame
++     */
++	public static Dataset<Row> createDataFrame(SparkSession spark, DataSetContainer dsc, MetadataStore mds) {
++		Dataset<Row> df = null;
++		MetaDataObject ds = dsc.getDataSet();
++		if (ds instanceof DataFile) {
++			DataFile dataFile = (DataFile) ds;
++			logger.log(Level.INFO, MessageFormat.format("Reading DataFile {0} from URL {1}.",
++					new Object[] { dataFile.getName(), dataFile.getUrlString() }));
++			df = spark.read().format("csv").option("inferSchema", "true")
++					.option("header", "true").load(dataFile.getUrlString());
++		} else if (ds instanceof Table) {
++			Table table = (Table) ds;
++			MetadataStore availableMetadataStore;
++			if (mds.testConnection() == MetadataStore.ConnectionStatus.OK) {
++				availableMetadataStore = mds;
++			} else if (dsc.getMetaDataCache() != null) {
++				availableMetadataStore = new CachedMetadataStore(dsc.getMetaDataCache()); 
++			} else {
++				throw new RuntimeException("Discovery service has no access to the metadata store and no metadata cache is available.");
++			}
++			JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) availableMetadataStore.getConnectionInfo(table);
++			List<Connection> connections = connectionInfo.getConnections();
++			if (connections == null || connections.isEmpty()) {
++				// No connection information is attached to the relational table that was passed to the discovery service.
++				// This is typically caused by the fact that the Spark discovery service cannot access the ODF metadata API in order to retrieve cached objects
++				String msg = "Spark discovery service cannot access the ODF metadata API. Make sure that the ODF REST API is accessible from the discovery service running on the Spark cluster.";
++				logger.log(Level.SEVERE, msg);
++				throw new RuntimeException(msg);
++			}
++			JDBCConnection jdbcConnection = null;
++			for (Connection connection : connections) {
++				if (connection instanceof JDBCConnection) {
++					jdbcConnection = (JDBCConnection) connection;
++					break;
++				}
++			}
++			String driver = null;
++			try {
++				// Get JDBC driver class name needed for populating DataFrame
++				// below
++				driver = DriverManager.getConnection(jdbcConnection.getJdbcConnectionString(), jdbcConnection.getUser(),
++						jdbcConnection.getPassword()).getClass().getName();
++				logger.log(Level.INFO, MessageFormat.format("JDBC driver class name is {0}.", driver));
++			} catch (SQLException e) {
++				String msg = MessageFormat.format("Error connecting to JDBC data source {0}: ",
++						jdbcConnection.getJdbcConnectionString());
++				logger.log(Level.WARNING, msg, e);
++				throw new RuntimeException(msg + Utils.exceptionString(e));
++			}
++			String schemaName = connectionInfo.getSchemaName();
++			String url = jdbcConnection.getJdbcConnectionString() + ":currentSchema=" + schemaName + ";user="
++					+ jdbcConnection.getUser() + ";password=" + jdbcConnection.getPassword() + ";";
++			String dbtable = schemaName + "." + table.getName();
++			String msg = "Using JDBC parameters url: {0}, dbtable: {1}, driver: {2} to connect to DB2 database.";
++			logger.log(Level.INFO, MessageFormat.format(msg, new Object[] { url, dbtable, driver }));
++			Map<String, String> options = new HashMap<String, String>();
++			options.put("url", url);
++			options.put("dbtable", dbtable);
++			options.put("driver", "com.ibm.db2.jcc.DB2Driver");
++			df = spark.read().format("jdbc").options(options).load();
++		}
++		return df;
++	}
++
++    /**
++     * Generates ODF annotations from a annotation data frames. 
++     * 
++     * @param container Data set container that contains the reference to the data set to be annotated
++     * @param annotationDataFrameMap Maps the annotation types to be created with the annotation data frames that contain the actual annotation data
++     * @return Result object that contains a list of ODF annotations
++     */
++	public static DiscoveryServiceResult createAnnotationsFromDataFrameMap(DataSetContainer container, Map<String, Dataset<Row>> annotationDataFrameMap, MetadataStore mds) throws RuntimeException {
++		RelationalDataSet tab = (RelationalDataSet) container.getDataSet();
++		DiscoveryServiceResult result = new DiscoveryServiceResult();
++
++		// Map input table columns to metadata object references
++		Map<String, MetaDataObjectReference> columnReferencesByName = new HashMap<>();
++
++		List<Column> colList ;
++		if (mds.testConnection() == MetadataStore.ConnectionStatus.OK) {
++			colList = mds.getColumns(tab);
++		} else if (container.getMetaDataCache() != null) {
++			CachedMetadataStore cacheReader = new CachedMetadataStore(container.getMetaDataCache());
++			colList = cacheReader.getColumns(tab);
++		} else {
++			throw new RuntimeException("Discovery service has no access to the metadata store and no metadata cache is available.");
++		}
++
++		for (MetaDataObject colMDO : colList) {
++			Column oMColumn = (Column) colMDO;
++			columnReferencesByName.put(oMColumn.getName(), oMColumn.getReference());
++		}
++
++		List<Annotation> annotations = new ArrayList<>();
++		Dataset<Row> df = null;
++		for (Map.Entry<String, Dataset<Row>> entry : annotationDataFrameMap.entrySet()) {
++			String annotationType = entry.getKey();
++			df = entry.getValue();
++			String columnToBeAnnotated = null;
++			int rowNumber = 0;
++			try {
++				List<Row> rows = df.collectAsList();
++				String[] columnNames = df.columns();
++				StructType st = df.schema();
++
++				for (rowNumber = 0; rowNumber < rows.size(); rowNumber++) {
++					if (columnNames[0].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME)) {
++						// Generate column annotations by mapping DataFrame
++						// table column values to annotation properties
++						// Column ANNOTATION_PROPERTY_COLUMN_NAME represents the
++						// column to be annotated
++						columnToBeAnnotated = rows.get(rowNumber).getString(0);
++						MetaDataObjectReference annotatedColumn = columnReferencesByName.get(columnToBeAnnotated);
++						if (annotatedColumn != null) {
++							logger.log(Level.FINE, MessageFormat.format("Annotating column {0}:", columnToBeAnnotated));
++							annotations.add((Annotation) getAnnotation(st, columnNames, annotationType, rows.get(rowNumber),
++									annotatedColumn));
++						} else {
++							logger.log(Level.FINE, "Column " + columnToBeAnnotated
++									+ " returned by the Spark service does not match any column of the input data set.");
++						}
++					} else {
++						// Creating table annotations
++						logger.log(Level.INFO,
++								MessageFormat.format(
++										"Data frame does not contain column {0}. Creating table annotations.",
++										DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME));
++						annotations.add((Annotation) getAnnotation(st, columnNames, annotationType, rows.get(rowNumber),
++								container.getDataSet().getReference()));
++					}
++				}
++			} catch (JSONException exc) {
++				String msg = MessageFormat.format(
++						"Error processing results returned by DataFrame row {0} column {1}. See ODF application lof for details.",
++						new Object[] { rowNumber, columnToBeAnnotated });
++				logger.log(Level.WARNING, msg);
++				throw new RuntimeException(msg, exc);
++			}
++		}
++		result.setAnnotations(annotations);
++		return result;
++	}
++
++    /**
++     * Creates a single ODF annotation from a row of input data. 
++     * 
++     * @param st Data types of the annotation attributes 
++     * @param columnNames Names of the annotation attributes
++     * @param row Input data that represents the values of the annotation attributes  
++     * @return A single ODF annotation object
++     */
++	public static Annotation getAnnotation(StructType st, String[] columnNames, String annotationType, Row row,
++			MetaDataObjectReference annotatedObject) throws JSONException {
++		ProfilingAnnotation an = new ProfilingAnnotation();
++		an.setAnnotationType(annotationType);
++		an.setProfiledObject(annotatedObject);
++		JSONObject jsonProperties = new JSONObject();
++		for (int j = 0; j < columnNames.length; j++) {
++			if (!columnNames[j].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME)) {
++				if (columnNames[j].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_SUMMARY_COLUMN_NAME)) {
++					an.setSummary(row.getString(j));
++				} else {
++					String annotationPropertyName = columnNames[j];
++					DataType dataType = st.apply(annotationPropertyName).dataType();
++					if (dataType == DataTypes.IntegerType) {
++						jsonProperties.put(annotationPropertyName, row.getInt(j));
++					} else if (dataType == DataTypes.DoubleType) {
++						jsonProperties.put(annotationPropertyName, row.getDouble(j));
++					} else if (dataType == DataTypes.BooleanType) {
++						jsonProperties.put(annotationPropertyName, row.getBoolean(j));
++					} else if (dataType == DataTypes.FloatType) {
++						jsonProperties.put(annotationPropertyName, row.getFloat(j));
++					} else if (dataType == DataTypes.LongType) {
++						jsonProperties.put(annotationPropertyName, row.getLong(j));
++					} else if (dataType == DataTypes.ShortType) {
++						jsonProperties.put(annotationPropertyName, row.getShort(j));
++					} else {
++						// Return all other data types as String
++						jsonProperties.put(annotationPropertyName, row.getString(j));
++					}
++					logger.log(Level.FINE, "Set attribute " + annotationPropertyName + " to value " + row.get(j) + ".");
++				}
++			}
++		}
++		an.setJsonProperties(jsonProperties.toString());
++		return an;
++	}
++
++    /**
++     * Transposes a Spark data frame by replacing its rows by its columns. All input columns are expected to be of type Double.
++     * The fist column of the resulting data frame contains the column names of the input data frame and is of data type String. All other output columns are of type Double.
++     * 
++     * @param sc Current Spark context 
++     * @param origDataFrame Data frame to be transposed
++     * @return Transposed data frame
++     */
++	public static Dataset<Row> transposeDataFrame(SparkSession spark, Dataset<Row> origDataFrame) {
++		Dataset<Row> transposedDataFrame = null;
++		String[] origColumnNames = origDataFrame.columns();
++		int origNumberColumns = origColumnNames.length;
++		List<Row> origRows = origDataFrame.collectAsList();
++		int origNumberRows = origRows.size();
++		List<Row> transposedRows = new ArrayList<Row>();
++
++		// Loop through columns of original DataFrame
++		for (int i = 1; i < origNumberColumns; i++) {
++			Object[] transposedRow = new Object[origNumberRows + 1];
++			transposedRow[0] = origColumnNames[i];
++			// Loop trough rows of original DataFrame
++			for (int j = 0; j < origNumberRows; j++) {
++				if (origRows.get(j).getString(i) == null) {
++					transposedRow[j + 1] = null;
++				} else {
++					try {
++						transposedRow[j + 1] = Double.parseDouble(origRows.get(j).getString(i));
++					} catch(NumberFormatException e) {
++						if (logger.getLevel() == Level.FINEST) {
++							String msg = MessageFormat.format("Cannot convert DataFrame column {0} row {1} value ''{2}'' to Double.", new Object[] { i, j, origRows.get(j).getString(i) });
++							logger.log(Level.FINEST, msg);
++						}
++						// Return null for all non-numeric fields
++						transposedRow[j + 1] = null;
++					}
++				}
++			}
++			transposedRows.add(RowFactory.create(transposedRow));
++		}
++
++		// Store original column name in first column of transposed DataFrame
++		StructField[] transposedColumnNames = new StructField[origNumberRows + 1];
++		transposedColumnNames[0] = DataTypes.createStructField(origColumnNames[0], DataTypes.StringType, false);
++		for (int j = 0; j < origNumberRows; j++) {
++			transposedColumnNames[j + 1] = DataTypes.createStructField(origRows.get(j).getString(0), DataTypes.DoubleType, false);
++		}
++		StructType st = DataTypes.createStructType(transposedColumnNames);
++		transposedDataFrame = spark.createDataFrame(transposedRows, st);
++		return transposedDataFrame;
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java
+new file mode 100755
+index 0000000..e4fcaa3
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java
+@@ -0,0 +1,114 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.api.utils;
++
++import java.io.IOException;
++import java.text.MessageFormat;
++import java.util.logging.FileHandler;
++import java.util.logging.Formatter;
++import java.util.logging.Handler;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++import java.util.logging.SimpleFormatter;
++
++/**
++ * Class to be used for log configuration.
++ * It reads the system property odf.logspec which value must be of the form
++ * <Level>,<FilePattern>
++ * For instance
++ *   ALL,/tmp/odf-trace.log
++ *
++ *
++ */
++public class ODFLogConfig {
++
++	void log(String s) {
++		System.out.println(s);
++	}
++
++	public static class ODFFileHandler extends FileHandler {
++		public ODFFileHandler(String pattern) throws IOException {
++			super(pattern);
++		}
++	}
++
++	Handler createHandler(String odfLogFilePattern) {
++		Handler result = null;
++		Formatter formatter = new SimpleFormatter();
++		try {
++			// 1MB file limit, 3 files
++			result = new ODFFileHandler(odfLogFilePattern);
++			result.setFormatter(formatter);
++		} catch (Exception exc) {
++			exc.printStackTrace();
++			return null;
++		}
++		return result;
++	}
++
++	private ODFLogConfig() {
++		try {
++			String logSpec = System.getProperty("odf.logspec");
++			log("ODF Logging spec of system property odf.logspec: " + logSpec);
++			if (logSpec == null) {
++				logSpec = System.getenv("ODFLOGSPEC");
++				log("ODF Logging spec of env var ODFLOGSPEC " + logSpec);
++				if (logSpec == null) {
++					return;
++				}
++			}
++			int ix = logSpec.indexOf(",");
++			if (ix == -1) {
++				return;
++			}
++			String levelString = logSpec.substring(0, ix);
++
++			String odfLogFilePattern = logSpec.substring(ix + 1);
++			String msg = MessageFormat.format("Configuring ODF logging with level {0} and log file: {1}", new Object[] { levelString, odfLogFilePattern });
++			log(msg);
++
++			Handler odfHandler = createHandler(odfLogFilePattern);
++			if (odfHandler == null) {
++				return;
++			}
++			Level level = Level.parse(levelString);
++			Logger odfRootLogger = Logger.getLogger("org.apache.atlas.odf");
++
++			// remove existing handler
++			for (Handler h : odfRootLogger.getHandlers()) {
++				if (h instanceof ODFFileHandler) {
++					odfRootLogger.removeHandler(h);
++				}
++			}
++
++			odfRootLogger.setLevel(level);
++			odfHandler.setLevel(level);
++			odfRootLogger.addHandler(odfHandler);
++			log("ODF logger configured.");
++		} catch (Exception exc) {
++			exc.printStackTrace();
++		}
++	}
++
++	static Object lockObject = new Object();
++	static ODFLogConfig config = null;
++
++	public static void run() {
++		synchronized (lockObject) {
++			if (config == null) {
++				config = new ODFLogConfig();
++			}
++		}
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java
+new file mode 100755
+index 0000000..6ea9c97
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java
+@@ -0,0 +1,165 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.json;
++
++import java.io.IOException;
++import java.lang.reflect.Field;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.Map.Entry;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import com.fasterxml.jackson.core.JsonParser;
++import com.fasterxml.jackson.core.JsonProcessingException;
++import com.fasterxml.jackson.databind.DeserializationContext;
++import com.fasterxml.jackson.databind.JsonNode;
++import com.fasterxml.jackson.databind.ObjectMapper;
++import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
++import com.fasterxml.jackson.databind.node.ObjectNode;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++
++/**
++ * The Jackson deserializer for Annotation objects
++ * 
++ *
++ */
++public class AnnotationDeserializer extends StdDeserializer<Annotation> {
++
++	private static final long serialVersionUID = -3143233438847937374L;
++	
++	Logger logger = Logger.getLogger(AnnotationDeserializer.class.getName());
++	
++	public AnnotationDeserializer() {
++		super(Annotation.class);
++	}
++
++	ClassLoader getClassLoader() {
++		return this.getClass().getClassLoader();
++	}
++	
++	@Override
++	public Annotation deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
++		ObjectMapper jpom = ((ObjectMapper) jp.getCodec());
++		ObjectNode tree = jpom.readTree(jp);
++		String jsonString = tree.toString();
++		Annotation result = null;
++
++		Class<? extends Annotation> javaClass = null;
++		JsonNode javaClassNode = tree.get("javaClass");
++		if (javaClassNode == null) {
++			throw new IOException("Can not deserialize object since the javaClass attribute is missing: " + jsonString);
++		}
++		JsonNode jsonPropertiesNode = tree.get("jsonProperties");
++		String javaClassName = javaClassNode.asText();
++		if (javaClassName.equals(ProfilingAnnotation.class.getName())) {
++			javaClass = ProfilingAnnotation.class;
++		}
++		else if (javaClassName.equals(ClassificationAnnotation.class.getName())) {
++			javaClass = ClassificationAnnotation.class;
++		}
++		else if (javaClassName.equals(RelationshipAnnotation.class.getName())) {
++			javaClass = RelationshipAnnotation.class;
++		}
++		else {
++			try {
++				javaClass = (Class<? extends Annotation>) this.getClassLoader().loadClass(javaClassName);
++				if (jsonPropertiesNode != null && !jsonPropertiesNode.isNull()) { // unfold jsonProperties in case of specific annotations
++					JsonNode jsonPropertiesNodeUnfolded = null;
++					if (jsonPropertiesNode.isTextual()) {
++						jsonPropertiesNodeUnfolded = jpom.readTree(jsonPropertiesNode.asText());					
++					}
++					else {
++						jsonPropertiesNodeUnfolded = jsonPropertiesNode; 
++					}
++					JsonNode newJsonPropertiesNode = (JsonNode)jp.getCodec().createObjectNode();    // initialize new jsonProperties node
++					Field classFields[] = javaClass.getDeclaredFields();
++					HashSet<String> classFieldSet = new HashSet<String>();
++					for (Field f: classFields) {
++						f.setAccessible(true);
++						String fieldName = f.getName();
++						classFieldSet.add(fieldName);
++					}
++					Iterator<Entry<String,JsonNode>> jsonPropertiesFields = jsonPropertiesNodeUnfolded.fields();
++					while (jsonPropertiesFields.hasNext()) { 
++						Entry<String,JsonNode> field = jsonPropertiesFields.next();
++						String fieldName = field.getKey();
++						if (JSONUtils.annotationFields.contains(fieldName)) {
++							throw new IOException("Name conflict: Field name in jsonProperties matches predefined field [" + fieldName + "]");
++						}
++						JsonNode fieldValue = field.getValue();
++						if (classFieldSet.contains(fieldName)) {
++							tree.set(fieldName, fieldValue);							
++						}
++						else {
++							((ObjectNode)newJsonPropertiesNode).set(fieldName, field.getValue());							
++						}
++					}
++					tree.put("jsonProperties", newJsonPropertiesNode.textValue());
++				}
++			} catch (ClassNotFoundException exc) {
++				String msg = MessageFormat.format("Java class ''{0}'' could not be deserialized automatically (probably because it is not on the classpath)", javaClassName);
++				logger.warning(msg);
++				logger.log(Level.FINE, msg, exc);
++			}
++			if (javaClass == null) {
++				if (tree.get("profiledObject") != null) {   // class not found -> create as instance of corresponding 'unknown' types
++					javaClass = ProfilingAnnotation.class;
++				}
++				else if (tree.get("classifiedObject") != null) {
++					javaClass = ClassificationAnnotation.class;
++				}
++				else if (tree.get("relatedObjects") != null) {
++					javaClass = RelationshipAnnotation.class;
++				}
++				else { // malformed annotation
++					javaClass = Annotation.class;
++				}
++				if (jsonPropertiesNode == null) {
++					jsonPropertiesNode = (JsonNode)jp.getCodec().createObjectNode(); // initialize if not already present
++				}
++				Iterator<Entry<String,JsonNode>> fields = tree.fields();
++				ArrayList<String> fieldsToRemove = new ArrayList<String>();
++				try {
++					while (fields.hasNext()) {     // move all fields not present in the predefined annotation types
++						Entry<String,JsonNode> field = fields.next();   // to the string valued jsonProperties attribute
++						String fieldName = field.getKey();
++						if (!JSONUtils.annotationFields.contains(fieldName)) {
++							((ObjectNode)jsonPropertiesNode).set(fieldName, field.getValue());
++							fieldsToRemove.add(fieldName);
++						}
++					}
++					String jsonProperties = (jsonPropertiesNode.isTextual()) ? jsonPropertiesNode.textValue() : jsonPropertiesNode.toString();
++					tree.put("jsonProperties", jsonProperties); 
++					for (String fieldToRemove:fieldsToRemove) {  // remove fields not present in the predefined annotation types
++						tree.remove(fieldToRemove);
++					}
++				}
++				catch (Exception e) {
++					throw new IOException(e);
++				}
++			}
++			jsonString = tree.toString();				
++		}
++		result = jpom.readValue(jsonString, javaClass);
++		logger.log(Level.FINEST, "Annotation created. Original: {0}, deserialized annotation: {1}", new Object[]{ jsonString, JSONUtils.lazyJSONSerializer(result)});
++		return result;
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java
+new file mode 100755
+index 0000000..6fcc28e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java
+@@ -0,0 +1,121 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.json;
++
++import java.io.IOException;
++import java.lang.reflect.Field;
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import com.fasterxml.jackson.core.JsonGenerator;
++import com.fasterxml.jackson.core.JsonProcessingException;
++import com.fasterxml.jackson.databind.SerializerProvider;
++import com.fasterxml.jackson.databind.ser.std.StdSerializer;
++
++/**
++ * The Jackson serializer for Annotation objects
++ * 
++ *
++ */
++public class AnnotationSerializer extends StdSerializer<Annotation> {
++
++	public AnnotationSerializer() {
++		this(null);
++	}
++	
++	public AnnotationSerializer(Class<Annotation> t) {
++		super(t);
++	}
++	
++	Logger logger = Logger.getLogger(AnnotationSerializer.class.getName());
++
++	ClassLoader getClassLoader() {
++		return this.getClass().getClassLoader();
++	}
++	
++	// In the following jsonProperties is either already pre-populated (because we are serializing an instance of ProfilingAnnotation, ....
++	// or it is created from all attributes not present in ProfilingAnnotation, or its ancestors (e.g. serializing an instance of ColumnAnalysisColumnAnntation)
++	// in the latter case jsonProperties is expected to be null
++	
++	@Override
++	public void serialize(Annotation annot, JsonGenerator jg, SerializerProvider sp) throws IOException, JsonProcessingException {
++		jg.writeStartObject();
++		Class<?> cl = annot.getClass();
++		class JSONPropField {
++			String name;
++			Object value;
++			JSONPropField(String name, Object value) {this.name = name; this.value = value;}
++		}
++		ArrayList<JSONPropField> jsonPropFields = null;
++		String jsonPropertiesValue = null;
++		while (cl != Object.class) {   // process class hierarchy up to and including MetaDataObject.class
++			Field fields[] = cl.getDeclaredFields();
++			for (Field f: fields) {
++				f.setAccessible(true);
++				String fieldName = f.getName();
++				try {
++					Object fieldValue = f.get(annot);
++					if (fieldName.equals("jsonProperties")) {
++						jsonPropertiesValue = (String)fieldValue;
++					}
++					else if (JSONUtils.annotationFields.contains(fieldName)) {
++						jg.writeFieldName(fieldName);
++						jg.writeObject(fieldValue);							
++					}
++					else {
++						if (jsonPropFields == null) jsonPropFields = new ArrayList<JSONPropField>();
++						jsonPropFields.add(new JSONPropField(fieldName, fieldValue));
++					}
++				}
++				catch (IllegalAccessException e) {
++					throw new IOException(e);
++				}
++			}
++			cl = cl.getSuperclass();
++		}
++		jg.writeFieldName("jsonProperties");
++		if (jsonPropFields != null) {
++			jg.writeStartObject();
++			if (jsonPropertiesValue != null) {
++				try {
++					JSONObject jo = new JSONObject(jsonPropertiesValue);
++					Iterator<String> it = jo.keys();
++			         while(it.hasNext()) {
++			             String key = it.next();
++			             jg.writeFieldName(key);
++			             jg.writeObject(jo.get(key));
++					}					
++				}
++				catch (JSONException e) {
++					throw new IOException(e);					
++				}
++			}
++			for (JSONPropField jpf:jsonPropFields) {
++				jg.writeFieldName(jpf.name);
++				jg.writeObject(jpf.value);								
++			}
++			jg.writeEndObject();				
++		}
++		else {
++			jg.writeString(jsonPropertiesValue);
++		}
++		jg.writeEndObject();
++	}
++
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java
+new file mode 100755
+index 0000000..d1ae80e
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java
+@@ -0,0 +1,69 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.json;
++
++import java.io.IOException;
++import java.text.MessageFormat;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import com.fasterxml.jackson.core.JsonParser;
++import com.fasterxml.jackson.core.JsonProcessingException;
++import com.fasterxml.jackson.databind.DeserializationContext;
++import com.fasterxml.jackson.databind.JsonNode;
++import com.fasterxml.jackson.databind.ObjectMapper;
++import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
++
++public class DefaultODFDeserializer<T> extends StdDeserializer<T> {
++	private static final long serialVersionUID = 4895771352050172936L;
++
++	Logger logger = Logger.getLogger(DefaultODFDeserializer.class.getName());
++
++	Class<? extends T> defaultClass;
++
++	public DefaultODFDeserializer(Class<T> cl, Class<? extends T> defaultClass) {
++		super(cl);
++		this.defaultClass = defaultClass;
++	}
++
++	ClassLoader getClassLoader() {
++		return this.getClass().getClassLoader();
++	}
++
++	@Override
++	public T deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
++		ObjectMapper jpom = ((ObjectMapper) jp.getCodec());
++		JsonNode tree = jpom.readTree(jp);
++		String jsonString = tree.toString();
++
++		Class<? extends T> javaClass = null;
++		String javaClassName = null;
++		try {
++			JsonNode javaClassNode = tree.get("javaClass");
++			javaClassName = javaClassNode.asText();
++			logger.log(Level.FINEST, "Trying to deserialize object of java class {0}", javaClassName);
++			javaClass = (Class<? extends T>) this.getClassLoader().loadClass(javaClassName);
++			if (javaClass != null) {
++				if (!javaClass.equals(this.handledType())) {
++					return jpom.readValue(jsonString, javaClass);
++				}
++			}
++		} catch (Exception exc) {
++			String msg = MessageFormat.format("Java class ''{0}'' could not be deserialized automatically (probably because it is not on the classpath)", javaClassName);
++			logger.warning(msg);
++			logger.log(Level.FINE, msg, exc);
++		}
++		return jpom.readValue(jsonString, defaultClass);
++	}
++}
+diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java
+new file mode 100755
+index 0000000..fe9d592
+--- /dev/null
++++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java
+@@ -0,0 +1,254 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.json;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.lang.reflect.Field;
++import java.util.ArrayList;
++import java.util.HashSet;
++import java.util.List;
++
++import org.apache.wink.json4j.JSONArray;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import com.fasterxml.jackson.core.JsonProcessingException;
++import com.fasterxml.jackson.core.Version;
++import com.fasterxml.jackson.databind.Module;
++import com.fasterxml.jackson.databind.ObjectMapper;
++import com.fasterxml.jackson.databind.module.SimpleModule;
++import org.apache.atlas.odf.api.metadata.UnknownMetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
++import org.apache.atlas.odf.api.metadata.models.UnknownConnection;
++import org.apache.atlas.odf.api.metadata.models.UnknownConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataStore;
++
++public class JSONUtils {
++	
++	public static HashSet<String> annotationFields = new HashSet<String>();
++	
++	static {
++		for (Class<?> cl: new Class<?>[]{Annotation.class, ProfilingAnnotation.class, ClassificationAnnotation.class,RelationshipAnnotation.class}) {
++			while (cl != Object.class) {   // process class hierarchy up to and including MetaDataObject.class
++				Field fields[] = cl.getDeclaredFields();
++				for (Field f: fields) {
++					f.setAccessible(true);
++					annotationFields.add(f.getName());
++				}
++				cl = cl.getSuperclass();
++			}			
++		}
++	}
++
++
++
++	// reuse object mapper for performance
++	private static ObjectMapper om = null;
++
++	static {
++		om = new ObjectMapper();
++		Module mod = createDefaultObjectMapperModule();
++		om.registerModule(mod);
++	}
++
++	public static ObjectMapper getGlobalObjectMapper() {
++		return om;
++	}
++
++	static Module createDefaultObjectMapperModule() {
++		SimpleModule mod = new SimpleModule("ODF Jackson module", Version.unknownVersion());
++		mod.addDeserializer(Annotation.class, new AnnotationDeserializer());
++		mod.addDeserializer(MetaDataObject.class, new DefaultODFDeserializer<MetaDataObject>(MetaDataObject.class, UnknownMetaDataObject.class));
++		mod.addDeserializer(DataSet.class, new DefaultODFDeserializer<DataSet>(DataSet.class, UnknownDataSet.class));
++		mod.addDeserializer(DataStore.class, new DefaultODFDeserializer<DataStore>(DataStore.class, UnknownDataStore.class));
++		mod.addDeserializer(Connection.class, new DefaultODFDeserializer<Connection>(Connection.class, UnknownConnection.class));
++		mod.addDeserializer(ConnectionInfo.class, new DefaultODFDeserializer<ConnectionInfo>(ConnectionInfo.class, UnknownConnectionInfo.class));
++		
++		mod.addSerializer(Annotation.class, new AnnotationSerializer());
++		return mod;
++
++	}
++	
++	public static JSONObject toJSONObject(Object o) throws JSONException {
++		JSONObject result;
++		try {
++			result = new JSONObject(om.writeValueAsString(o));
++			if (o instanceof Annotation) {
++				Object jsonPropsObject = result.get("jsonProperties");
++				if (jsonPropsObject instanceof JSONObject) {    // the value of jsonProperties must be of type 'String'
++					result.put("jsonProperties", ((JSONObject)jsonPropsObject).toString());	
++				}
++			}
++		} catch (JsonProcessingException e) {
++			throw new JSONException(e);
++		}
++		return result;
++	}
++
++	public static String toJSON(Object o) throws JSONException {
++		String result;
++		try {
++			result = om.writeValueAsString(o);
++			if (o instanceof Annotation) {
++				JSONObject json = new JSONObject(result);
++				Object jsonPropsObject = json.get("jsonProperties");
++				if (jsonPropsObject instanceof JSONObject) {    // the value of jsonProperties must be of type 'String'
++					json.put("jsonProperties", ((JSONObject)jsonPropsObject).toString());	
++					result = json.toString();
++				}
++			}
++		} catch (JsonProcessingException e) {
++			throw new JSONException(e);
++		}
++		return result;
++	}
++
++	public static <T> List<T> fromJSONList(String s, Class<T> cl) throws JSONException {
++		JSONArray ar = new JSONArray(s);
++		List<T> result = new ArrayList<>();
++		for (Object o : ar) {
++			JSONObject jo = (JSONObject) o;
++			T t = (T) fromJSON(jo.write(), cl);
++			result.add(t);
++		}
++		return result;
++
++	}
++
++	public static <T> List<T> fromJSONList(InputStream is, Class<T> cl) throws JSONException {
++		JSONArray ar = new JSONArray(is);
++		List<T> result = new ArrayList<>();
++		for (Object o : ar) {
++			JSONObject jo = (JSONObject) o;
++			T t = (T) fromJSON(jo.write(), cl);
++			result.add(t);
++		}
++		return result;
++	}
++
++	public static <T> T fromJSON(String s, Class<T> cl) throws JSONException {
++		T result = null;
++		try {
++			result = om.readValue(s, cl);
++		} catch (JsonProcessingException exc) {
++			// propagate JSON exception
++			throw new JSONException(exc);
++		} catch (IOException e) {
++			throw new RuntimeException(e);
++		}
++
++		return result;
++	}
++
++	public static <T> T fromJSON(InputStream is, Class<T> cl) throws JSONException {
++		return fromJSON(getInputStreamAsString(is, "UTF-8"), cl);
++	}
++
++	public static <T> T readJSONObjectFromFileInClasspath(Class<T> cl, String pathToFile, ClassLoader classLoader) {
++		if (classLoader == null) {
++			// use current classloader if not provided
++			classLoader = JSONUtils.class.getClassLoader();
++		}
++		InputStream is = classLoader.getResourceAsStream(pathToFile);
++		T result = null;
++		try {
++			result = om.readValue(is, cl);
++		} catch (IOException e) {
++			// assume that this is a severe error since the provided JSONs should be correct
++			throw new RuntimeException(e);
++		}
++
++		return result;
++	}
++
++	public static <T> T cloneJSONObject(T obj) throws JSONException {
++		// special case: use Annotation.class in case obj is an annotation subclass to ensure that the annotation deserializer is used
++		if (Annotation.class.isAssignableFrom(obj.getClass())) {
++			return (T) fromJSON(toJSON(obj), Annotation.class);
++		}
++		return fromJSON(toJSON(obj), (Class<T>) obj.getClass());
++	}
++
++	
++	public static void mergeJSONObjects(JSONObject source, JSONObject target) {
++		if (source != null && target != null) {
++			target.putAll(source);
++		}
++	}
++
++	// use this method, e.g., if you want to use JSON objects in log / trace messages
++	// and want to do serialization only if tracing is on
++	public static Object lazyJSONSerializer(final Object jacksonObject) {
++		return new Object() {
++
++			@Override
++			public String toString() {
++				try {
++					return toJSON(jacksonObject);
++				} catch (JSONException e) {
++					return e.getMessage();
++				}
++			}
++
++		};
++	}
++
++	public static Object jsonObject4Log(final JSONObject obj) {
++		return new Object() {
++
++			@Override
++			public String toString() {
++				try {
++					return obj.write();
++				} catch (Exception e) {
++					return e.getMessage();
++				}
++			}
++
++		};
++	}
++
++	public static String getInputStreamAsString(InputStream is, String encoding) {
++		try {
++			final int n = 2048;
++			byte[] b = new byte[0];
++			byte[] temp = new byte[n];
++			int bytesRead;
++			while ((bytesRead = is.read(temp)) != -1) {
++				byte[] newB = new byte[b.length + bytesRead];
++				System.arraycopy(b, 0, newB, 0, b.length);
++				System.arraycopy(temp, 0, newB, b.length, bytesRead);
++				b = newB;
++			}
++			String s = new String(b, encoding);
++			return s;
++		} catch (IOException exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++	
++	public static <T, S> T convert(S source, Class<T> targetClass) throws JSONException {
++		return fromJSON(toJSON(source), targetClass);
++	}
++}
+diff --git a/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java b/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java
+new file mode 100755
+index 0000000..da8d3af
+--- /dev/null
++++ b/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java
+@@ -0,0 +1,406 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.test.json;
++
++import java.text.ParseException;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.InvalidReference;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++import org.apache.wink.json4j.JSON;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Ignore;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ODFJSONSerializationTest {
++
++	Logger logger = Logger.getLogger(ODFJSONSerializationTest.class.getName());
++
++	MetaDataObjectReference createNewRef() {
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId(UUID.randomUUID().toString());
++		ref.setRepositoryId("odftestrepositoryid");
++		return ref;
++	}
++
++	static class NewAnnotation extends ProfilingAnnotation {
++		String newProp;
++
++		public String getNewProp() {
++			return newProp;
++		}
++
++		public void setNewProp(String newProp) {
++			this.newProp = newProp;
++		}
++
++	}
++
++	List<MetaDataObject> createTestObjects() throws JSONException, ParseException {
++		List<MetaDataObject> testObjects = new ArrayList<>();
++
++		Column col = new Column();
++		MetaDataObjectReference colref = createNewRef();
++		col.setReference(colref);
++		col.setName("col1");
++		col.setDescription("column desc");
++		col.setDataType("theDatatype");
++
++		Table t = new Table();
++		MetaDataObjectReference tableRef = createNewRef();
++		t.setReference(tableRef);
++		t.setName("Table");
++		t.setDescription("table desc");
++
++		Database db = new Database();
++		MetaDataObjectReference dbref = createNewRef();
++		db.setReference(dbref);
++		db.setName("DB");
++		db.setDescription("db description");
++
++		JDBCConnection jdbcConn = new JDBCConnection();
++		MetaDataObjectReference jdbcConnRef = createNewRef();
++		jdbcConn.setReference(jdbcConnRef);
++		jdbcConn.setName("jdbc connection");
++		jdbcConn.setUser("theUser");
++		jdbcConn.setPassword("thePassword");
++		jdbcConn.setJdbcConnectionString("jdbc:db2:localhost:50000/SAMPLE");
++		db.setConnections(Collections.singletonList(jdbcConnRef));
++
++		ProfilingAnnotation profAnnot1 = new ProfilingAnnotation();
++		MetaDataObjectReference uaRef = createNewRef();
++		profAnnot1.setReference(uaRef);
++		profAnnot1.setProfiledObject(jdbcConnRef);
++		profAnnot1.setJsonProperties("{\"a\": \"b\"}");
++
++		ProfilingAnnotation profAnnot2 = new ProfilingAnnotation();
++		MetaDataObjectReference mdoRef = createNewRef();
++		profAnnot2.setReference(mdoRef);
++		profAnnot2.setProfiledObject(jdbcConnRef);
++		profAnnot2.setJsonProperties("{\"a\": \"b\"}");
++
++		NewAnnotation newAnnot = new NewAnnotation();
++		MetaDataObjectReference newAnnotRef = createNewRef();
++		newAnnot.setReference(newAnnotRef);
++
++		// a generic DataSet
++		UnknownDataSet ds = new UnknownDataSet();
++		ds.setName("generic data set");
++		ds.setReference(createNewRef());
++
++		MetaDataObject[] mdos = new MetaDataObject[] { db, jdbcConn, t, col, profAnnot1, profAnnot2, newAnnot, ds };
++		testObjects.addAll(Arrays.asList(mdos));
++		return testObjects;
++	}
++
++	@Test
++	public void testSerialization() throws Exception {
++		List<MetaDataObject> testObjects = createTestObjects();
++
++		for (MetaDataObject testObject : testObjects) {
++			Class<?> cl = testObject.getClass();
++			logger.info("Testing serialization / deserialization of object: " + testObject + " of class: " + cl);
++
++			String json = JSONUtils.toJSON(testObject);
++			logger.info("Serialized json: " + json);
++
++			Object objStronglyTypedClass;
++			if (testObject instanceof Annotation) { // special treatment for Annotations -> 2nd arg of fromJSON() needs to be Annotation.class
++				objStronglyTypedClass = JSONUtils.fromJSON(json, Annotation.class);
++				Assert.assertEquals(cl, objStronglyTypedClass.getClass());
++			}
++			else {
++				 objStronglyTypedClass = JSONUtils.fromJSON(json, cl);
++				 Assert.assertEquals(cl, objStronglyTypedClass.getClass());
++			}
++			String json1 = JSONUtils.toJSON(objStronglyTypedClass);
++			Assert.assertEquals(json, json1);
++
++			Object objWithGenericClass = JSONUtils.fromJSON(json, MetaDataObject.class);
++
++			Assert.assertEquals(cl, objWithGenericClass.getClass());
++			String json2 = JSONUtils.toJSON(objWithGenericClass);
++			Assert.assertEquals(json, json2);
++
++			Class<?> intermediateClasses[] = new Class<?>[] { MetaDataObject.class, DataSet.class, DataStore.class, Connection.class };
++
++			for (Class<?> intermediateClass : intermediateClasses) {
++				logger.info("Checking intermediate class: " + intermediateClass);
++				if (intermediateClass.isAssignableFrom(cl)) {
++
++					Object intermediateObject = JSONUtils.fromJSON(json, intermediateClass);
++					logger.info("Deserialized object: " + intermediateObject);
++					logger.info("Deserialized object class: " + intermediateObject.getClass());
++
++					Assert.assertTrue(intermediateClass.isAssignableFrom(intermediateObject.getClass()));
++					Assert.assertEquals(cl, intermediateObject.getClass());
++					String json3 = JSONUtils.toJSON(intermediateObject);
++					Assert.assertEquals(json, json3);
++				}
++			}
++
++		}
++	}
++
++	/**
++	 * Test serialization of an Annotation (subclass) which has both, its own fields (to be mapped to jsonProperties) and
++	 * a non-empty jsonProperties attribute holding the string representation of a Json object.
++	 */
++
++	@Test
++	public void testJsonPropertiesMerge() {
++		NewAnnotation annot = new NewAnnotation();
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId("id");
++		ref.setRepositoryId("repoid");
++		ref.setUrl("http://url");
++		annot.setProfiledObject(ref);
++		annot.setNewProp("newPropValue");
++		annot.setJsonProperties("{\"oldProp\":\"oldPropValue\"}");
++		JSONObject jo = null;
++		try {
++			jo = JSONUtils.toJSONObject(annot);
++			String jsonPropertiesString = jo.getString("jsonProperties");
++			JSONObject jo2 = new JSONObject(jsonPropertiesString);
++			Assert.assertEquals("oldPropValue", jo2.get("oldProp"));
++			Assert.assertEquals("newPropValue", jo2.get("newProp"));
++		}
++		catch (JSONException e) {
++			e.printStackTrace();
++		}
++	}
++
++	final static private String MERGED_JSON = "{" +
++			"\"analysisRun\":null," +
++			"\"summary\":null," +
++			"\"reference\":null," +
++			"\"originRef\":null," +
++			"\"replicaRefs\":null," +
++			"\"javaClass\":\"org.apache.atlas.odf.json.test.ODFJSONSerializationTest$NewAnnotation\"," +
++			"\"jsonProperties\":\"{" +
++			   "\\\"newProp\\\":\\\"newPropValue\\\"," +
++			   "\\\"oldProp\\\":\\\"oldPropValue\\\"" +
++			   "}\"," +
++			"\"name\":null," +
++			"\"annotationType\":\"NewAnnotation\"," +
++			"\"description\":null," +
++			"\"profiledObject\":{" +
++			   "\"repositoryId\":\"repoid\"," +
++			   "\"id\":\"id\"," +
++			   "\"url\":\"http://url\"}" +
++	        "}";
++
++	/**
++	 * Test deserialization of a Json object which has fields in its jsonProperties that can not be mapped to native fields of
++	 * the target class (= value of javaClass field). These and only these remain as fields in the text encoded Json object
++	 * stored in the jsonProperties field of the result.
++	 */
++
++	@Test
++	@Ignore
++	public void testJsonPropertiesUnmerge() throws Exception {
++		logger.info("Deserializing JSON: " + MERGED_JSON);
++		Annotation annot = JSONUtils.fromJSON(MERGED_JSON, Annotation.class);
++		Assert.assertTrue(annot instanceof NewAnnotation);
++		NewAnnotation newAnnot = (NewAnnotation) annot;
++		Assert.assertEquals("newPropValue", newAnnot.getNewProp());
++		JSONObject props = (JSONObject) JSON.parse(annot.getJsonProperties());
++
++		Assert.assertNotNull(props.get("oldProp"));
++		Assert.assertEquals("oldPropValue", props.get("oldProp"));
++
++		JSONObject jo = JSONUtils.toJSONObject(annot);
++		Assert.assertEquals(MERGED_JSON, jo.toString());
++	}
++
++	final private static String PROFILING_ANNOTATION_JSON = "{" +
++			"\"profiledObject\": null," +
++			"\"annotationType\": \"MySubType1\"," +
++			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType1\"," +
++			"\"analysisRun\": \"bla\"," +
++			"\"newProp1\": 42," +
++			"\"newProp2\": \"hi\"," +
++			"\"newProp3\": \"hello\"" +
++		"}";
++
++	final private static String CLASSIFICATION_ANNOTATION_JSON = "{" +
++			"\"classifyingObject\": null," +
++			"\"classifiedObject\": null," +
++			"\"annotationType\": \"MySubType2\"," +
++			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType2\"," +
++			"\"analysisRun\": \"bla\"," +
++			"\"newProp1\": 42," +
++			"\"newProp2\": \"hi\"," +
++			"\"newProp3\": \"hello\"" +
++		"}";
++
++	final private static String RELATIONSHIP_ANNOTATION_JSON = "{" +
++			"\"relatedObjects\": null," +
++			"\"annotationType\": \"MySubType3\"," +
++			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType3\"," +
++			"\"analysisRun\": \"bla\"," +
++			"\"newProp1\": 42," +
++			"\"newProp2\": \"hi\"," +
++			"\"newProp3\": \"hello\"" +
++		"}";
++
++	 /**
++	  *  Replacement for AtlasAnnotationTypeDefinitionCreatTest
++	  */
++
++	@Test
++	public void testSimpleAnnotationPrototypeCreation() throws Exception {
++		logger.info("Annotation string: " + PROFILING_ANNOTATION_JSON);
++		Annotation annot = JSONUtils.fromJSON(PROFILING_ANNOTATION_JSON, Annotation.class);
++		logger.info("Annotation: " + PROFILING_ANNOTATION_JSON);
++		Assert.assertTrue(annot instanceof ProfilingAnnotation);
++
++		logger.info("Annotation string: " + CLASSIFICATION_ANNOTATION_JSON);
++		annot = JSONUtils.fromJSON(CLASSIFICATION_ANNOTATION_JSON, Annotation.class);
++		logger.info("Annotation: " + CLASSIFICATION_ANNOTATION_JSON);
++		Assert.assertTrue(annot instanceof ClassificationAnnotation);
++
++		logger.info("Annotation string: " + RELATIONSHIP_ANNOTATION_JSON);
++		annot = JSONUtils.fromJSON(RELATIONSHIP_ANNOTATION_JSON, Annotation.class);
++		logger.info("Annotation: " + RELATIONSHIP_ANNOTATION_JSON);
++		Assert.assertTrue(annot instanceof RelationshipAnnotation);
++	}
++
++	@Test
++	public void testUnretrievedReference() throws Exception {
++		String repoId = "SomeRepoId";
++		Column col = new Column();
++		col.setName("name");
++		col.setReference(InvalidReference.createInvalidReference(repoId));
++
++		String json = JSONUtils.toJSON(col);
++		Column col2 = JSONUtils.fromJSON(json, Column.class);
++		Assert.assertTrue(InvalidReference.isInvalidRef(col2.getReference()));
++
++		Database db = new Database();
++		db.setName("database");
++
++		JSONUtils.toJSON(db);
++
++		db.setConnections(InvalidReference.createInvalidReferenceList(repoId));
++
++		Database db2 = JSONUtils.fromJSON(JSONUtils.toJSON(db), Database.class);
++		Assert.assertTrue(InvalidReference.isInvalidRefList(db2.getConnections()));
++	}
++
++	@Test
++	public void testExtensibleDiscoveryServiceEndpoints() throws Exception {
++		DiscoveryServiceProperties dsprops = new DiscoveryServiceProperties();
++		dsprops.setId("theid");
++		dsprops.setName("thename");
++
++		DiscoveryServiceEndpoint ep = new DiscoveryServiceEndpoint();
++		ep.setRuntimeName("newruntime");
++		ep.set("someKey", "someValue");
++		dsprops.setEndpoint(ep);
++
++		String dspropsJSON = JSONUtils.toJSON(dsprops);
++		logger.info("Discovery service props JSON: " +dspropsJSON);
++
++		DiscoveryServiceProperties deserProps = JSONUtils.fromJSON(dspropsJSON, DiscoveryServiceProperties.class);
++		Assert.assertNotNull(deserProps);
++		Assert.assertEquals("theid", dsprops.getId());
++		Assert.assertEquals("thename", dsprops.getName());
++		Assert.assertNotNull(deserProps.getEndpoint());
++		Assert.assertTrue(deserProps.getEndpoint() instanceof DiscoveryServiceEndpoint);
++		Assert.assertTrue(deserProps.getEndpoint().getClass().equals(DiscoveryServiceEndpoint.class));
++		DiscoveryServiceEndpoint deserEP = (DiscoveryServiceEndpoint) deserProps.getEndpoint();
++		Assert.assertEquals("newruntime", deserEP.getRuntimeName());
++		Assert.assertEquals("someValue", deserEP.get().get("someKey"));
++	}
++
++	@Test
++	public void testMetaDataCache() {
++		MetaDataCache cache = new MetaDataCache();
++
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId("id");
++		ref.setRepositoryId("repositoryId");
++		DataFile dataFile = new DataFile();
++		dataFile.setName("dataFile");
++		dataFile.setEncoding("encoding");
++		dataFile.setReference(ref);
++
++		List<MetaDataObjectReference> refList = new ArrayList<MetaDataObjectReference>();
++		refList.add(ref);
++		StoredMetaDataObject storedObject = new StoredMetaDataObject(dataFile);
++		HashMap<String, List<MetaDataObjectReference>> referenceMap = new HashMap<String, List<MetaDataObjectReference>>();
++		referenceMap.put("id", refList);
++		storedObject.setReferencesMap(referenceMap);
++		List<StoredMetaDataObject> metaDataObjects = new ArrayList<StoredMetaDataObject>();
++		metaDataObjects.add(storedObject);
++		cache.setMetaDataObjects(metaDataObjects);
++
++		Connection con = new JDBCConnection();
++		con.setName("connection");
++		JDBCConnectionInfo conInfo = new JDBCConnectionInfo();
++		conInfo.setConnections(Collections.singletonList(con));
++		conInfo.setAssetReference(ref);
++		conInfo.setTableName("tableName");
++		List<ConnectionInfo> connectionInfoObjects = new ArrayList<ConnectionInfo>();
++		connectionInfoObjects.add(conInfo);
++		cache.setConnectionInfoObjects(connectionInfoObjects);
++
++		try {
++			String serializedCache = JSONUtils.toJSON(cache);
++			logger.info("Serialized metadata cache JSON: " + serializedCache);
++			MetaDataCache deserializedCache = JSONUtils.fromJSON(serializedCache, MetaDataCache.class);
++			Assert.assertEquals("dataFile", deserializedCache.getMetaDataObjects().get(0).getMetaDataObject().getName());
++			Assert.assertEquals("encoding", ((DataFile) deserializedCache.getMetaDataObjects().get(0).getMetaDataObject()).getEncoding());
++			Assert.assertEquals("connection", deserializedCache.getConnectionInfoObjects().get(0).getConnections().get(0).getName());
++			Assert.assertEquals("tableName", ((JDBCConnectionInfo) deserializedCache.getConnectionInfoObjects().get(0)).getTableName());
++			Assert.assertEquals("repositoryId", deserializedCache.getMetaDataObjects().get(0).getReferenceMap().get("id").get(0).getRepositoryId());
++		}
++		catch (JSONException e) {
++			e.printStackTrace();
++		}
++	}
++
++
++}
+diff --git a/odf/odf-archetype-discoveryservice/.gitignore b/odf/odf-archetype-discoveryservice/.gitignore
+new file mode 100755
+index 0000000..70349b0
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/.gitignore
+@@ -0,0 +1,4 @@
++.settings
++target
++.classpath
++.project
+diff --git a/odf/odf-archetype-discoveryservice/pom.xml b/odf/odf-archetype-discoveryservice/pom.xml
+new file mode 100755
+index 0000000..c9c2aed
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/pom.xml
+@@ -0,0 +1,52 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-archetype-discoveryservice</artifactId>
++	<packaging>maven-archetype</packaging>
++
++	<description>The SDP maven archetype for discovery services</description>
++
++	<properties>
++		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
++	</properties>
++
++	<build>
++		<extensions>
++			<extension>
++				<groupId>org.apache.maven.archetype</groupId>
++				<artifactId>archetype-packaging</artifactId>
++				<version>2.4</version>
++			</extension>
++		</extensions>
++
++		<pluginManagement>
++			<plugins>
++				<plugin>
++					<artifactId>maven-archetype-plugin</artifactId>
++					<version>2.4</version>
++				</plugin>
++			</plugins>
++		</pluginManagement>
++	</build>
++
++</project>
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml b/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml
+new file mode 100755
+index 0000000..9848e46
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml
+@@ -0,0 +1,27 @@
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<archetype>
++  <id>odf-archetype-discoveryservice-jar</id>
++  <sources>
++    <source>src/main/java/MyAnnotation.java</source>
++    <source>src/main/java/MyDiscoveryService.java</source>
++  </sources>
++  <resources>
++    <resource>src/main/resources/META-INF/odf/odf-services.json</resource>
++  </resources>
++  <testSources>
++    <source>src/test/java/MyDiscoveryServiceTest.java</source>
++  </testSources>
++</archetype>
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml
+new file mode 100755
+index 0000000..0ada9e8
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml
+@@ -0,0 +1,42 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
++	<modelVersion>4.0.0</modelVersion>
++	<groupId>${groupId}</groupId>
++	<artifactId>${artifactId}</artifactId>
++	<version>${version}</version>
++	<packaging>jar</packaging>
++
++	<properties>
++		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
++	</properties>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++	</dependencies>
++</project>
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java
+new file mode 100755
+index 0000000..f75474e
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java
+@@ -0,0 +1,17 @@
++package ${package};
++
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++
++public class MyAnnotation extends ProfilingAnnotation {
++
++	private String myProperty;
++
++	public String getMyProperty() {
++		return myProperty;
++	}
++
++	public void setMyProperty(String myValue) {
++		this.myProperty = myValue;
++	}
++
++}
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java
+new file mode 100755
+index 0000000..1498e7e
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java
+@@ -0,0 +1,33 @@
++package ${package};
++
++import java.util.Collections;
++import java.util.Date;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
++import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++/**
++ * A simple synchronous discovery service that creates one annotation for the data set it analyzes.
++ *
++ */
++public class MyDiscoveryService extends SyncDiscoveryServiceBase {
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		// 1. create an annotation that annotates the data set object passed in the request
++		MyAnnotation annotation = new MyAnnotation();
++		annotation.setProfiledObject(request.getDataSetContainer().getDataSet().getReference());
++		// set a new property called "tutorialProperty" to some string
++		annotation.setMyProperty("My property was created on " + new Date());
++
++		// 2. create a response with our annotation created above
++		return createSyncResponse( //
++				ResponseCode.OK, // Everything works OK 
++				"Everything worked", // human-readable message
++				Collections.singletonList(annotation) // new annotations
++		);
++	}
++
++}
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json
+new file mode 100755
+index 0000000..e90ce7b
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json
+@@ -0,0 +1,11 @@
++[
++  {
++	"id": "${groupId}.${artifactId}.MyDiscoveryService",
++	"name": "My service",
++	"description": "My service creates my annotation for a data set",
++	"endpoint": {
++		"runtimeName": "Java",
++		"className": "${package}.MyDiscoveryService"
++	}
++  }
++]
+diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java
+new file mode 100755
+index 0000000..842830d
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java
+@@ -0,0 +1,15 @@
++package ${package};
++
++import org.junit.Assert;
++import org.junit.Test;
++
++/**
++ * Unit test template for discovery service
++ */
++public class MyDiscoveryServiceTest {
++	
++	@Test
++	public void test() throws Exception {
++		Assert.assertTrue(true);
++	}
++}
+diff --git a/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties
+new file mode 100755
+index 0000000..9fbb593
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties
+@@ -0,0 +1,23 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++archetype.groupId=org.apache.atlas.odf
++archetype.artifactId=odf-archetype-discoveryservice-jar
++archetype.version=1.2.0-SNAPSHOT
++
++groupId=jg1
++artifactId=ja1
++version=0.1
++package=odf.j.p1.p2
++
+diff --git a/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt
+new file mode 100755
+index 0000000..31ed2f8
+--- /dev/null
++++ b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt
+@@ -0,0 +1 @@
++clean verify
+diff --git a/odf/odf-atlas/.gitignore b/odf/odf-atlas/.gitignore
+new file mode 100755
+index 0000000..7c65173
+--- /dev/null
++++ b/odf/odf-atlas/.gitignore
+@@ -0,0 +1,7 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++.DS_Store
++derby.log
+diff --git a/odf/odf-atlas/atlasconfig/jetty-web.xml b/odf/odf-atlas/atlasconfig/jetty-web.xml
+new file mode 100755
+index 0000000..13095a4
+--- /dev/null
++++ b/odf/odf-atlas/atlasconfig/jetty-web.xml
+@@ -0,0 +1,25 @@
++<!--
++~ (C) Copyright IBM Corp. 2017
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<Configure class="org.eclipse.jetty.webapp.WebAppContext">
++	<Get name="securityHandler">
++		<Set name="loginService">
++			<New class="org.eclipse.jetty.security.HashLoginService">
++				<Set name="name">ODF Realm</Set>
++				<Set name="config"><SystemProperty name="atlas.home" default="."/>/conf/realm.properties</Set>
++			</New>
++		</Set>
++	</Get>
++</Configure>
+diff --git a/odf/odf-atlas/atlasconfig/realm.properties b/odf/odf-atlas/atlasconfig/realm.properties
+new file mode 100755
+index 0000000..0d57c4a
+--- /dev/null
++++ b/odf/odf-atlas/atlasconfig/realm.properties
+@@ -0,0 +1,24 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Credentials for Atlas basic authentication
++#
++# Format:
++# <username>: <password>[,<rolename> ...]
++#
++# Password is stored in obfuscated format.
++# Re-generate password using the org.eclipse.jetty.util.security.Password class in the jetty lib folder.
++# Example:
++# cd jetty-distribution-<version>/lib
++# java -cp jetty-util-<version>.jar org.eclipse.jetty.util.security.Password <plain password>
++atlas: OBF:1v1p1s3m1w1s1wtw1u3019q71u2a1wui1w1q1s3g1v2p,user
+diff --git a/odf/odf-atlas/build_atlas.xml b/odf/odf-atlas/build_atlas.xml
+new file mode 100755
+index 0000000..8b6de87
+--- /dev/null
++++ b/odf/odf-atlas/build_atlas.xml
+@@ -0,0 +1,265 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project name="build_atlas">
++	<dirname property="script.basedir" file="${ant.file.build_atlas}" />
++	<property name="atlas-dir" value="apache-atlas-${atlas.version}" />
++	<!-- Properties  provided by pom.xml: -->
++	<!-- <property name="atlas-unpack-dir" value="" /> -->
++	<!-- <property name="atlas.version" value="" /> -->
++
++	<property name="atlas-archive" value="/tmp/${atlas-dir}-bin.zip" />
++
++	<condition property="is-windows">
++		<os family="windows">
++		</os>
++	</condition>
++
++	<condition property="is-unix">
++		<os family="unix">
++		</os>
++	</condition>
++
++	<condition property="is-mac">
++		<os family="mac">
++		</os>
++	</condition>
++
++	<condition property="atlas-zip-not-found">
++		<not>
++			<available file="${atlas-archive}">
++			</available>
++		</not>
++	</condition>
++
++	<condition property="atlas-unpacked">
++	   <available file="${atlas-unpack-dir}/${atlas-dir}/bin/atlas_start.py"/>
++    </condition>
++
++	<condition property="atlas-running">
++		<available file="${atlas-unpack-dir}/${atlas-dir}/logs/atlas.pid"/>
++	</condition>
++
++	<condition property="running-build-process">
++		<equals arg1="${atlas-unpack-dir}" arg2="/tmp"/>
++	</condition>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="download-atlas" if="atlas-zip-not-found">
++		<echo message="Downloading Apache Atlas 0.7-incubating-release. Depending on your network this can last up to 20 (yes, twenty) minutes." />
++		<!-- Make sure to update text message when moving to a new Atlas release / revision -->
++		<get verbose="true" src="https://ibm.box.com/shared/static/ftwi0wlpjtyv3nnvyh354epayqfwynsn.zip" dest="${atlas-archive}" />
++		<echo message="Atlas downloaded" />
++	</target>
++
++	<target name="unzip-atlas" unless="atlas-unpacked">
++		<antcall target="download-atlas"/>
++		<echo message="Installing Atlas test instance" />
++		<echo message="Deleting ${atlas-unpack-dir}/${atlas-dir}" />
++		<delete dir="${atlas-unpack-dir}/${atlas-dir}" failonerror="false" />
++		<echo message="deleted" />
++		<chmod file="${atlas-unpack-dir}/${atlas-archive}" perm="755" os="unix,mac"/>
++		<unzip src="${atlas-archive}" dest="${atlas-unpack-dir}" />
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="stop-atlas" if="atlas-unpacked">
++		<echo message="Stopping atlas server if it exists" />
++		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
++			<env key="JAVA_HOME" value="${java.home}" />
++        		<arg value="atlas_stop.py" />
++    		</exec>
++		<sleep seconds="10" />
++	</target>
++
++	<target name="ensure-atlas-stopped" depends="print-info" unless="use.running.atlas">
++		<echo message="Ensure Atlas is stopped..."/>
++		<antcall target="stop-atlas"/>
++		<delete file="${atlas-unpack-dir}/${atlas-dir}/logs/atlas.pid"/>
++		<echo message="Atlas is stopped."/>
++	</target>
++
++	<target name="remove-atlas-dir" depends="ensure-atlas-stopped" if="running-build-process">
++    	<echo message="Resetting atlas data"/>
++    	<delete dir="/tmp/${atlas-dir}" />
++    	<echo message="Atlas directory deleted"/>
++	</target>
++
++	<target name="reset-derby-data">
++    	<echo message="Resetting derby DB"/>
++    	<delete dir="/tmp/odf-derby" />
++	</target>
++
++	<target name="restart-atlas-on-windows" if="is-windows">
++		<antcall target="start-atlas"/>
++		<antcall target="stop-atlas"/>
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="start-atlas">
++		<echo message="Starting atlas server" />
++		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
++			<env key="JAVA_HOME" value="${java.home}/.." />
++			<arg value="atlas_start.py" />
++		</exec>
++		<echo message="Waiting for Atlas Server to start..." />
++		<waitfor maxwait="60" maxwaitunit="second">
++			<socket server="localhost" port="21443" />
++		</waitfor>
++	</target>
++
++	<target name="check-atlas-url">
++		<fail>
++			<condition>
++				<not>
++					<socket server="localhost" port="21443" />
++				</not>
++			</condition>
++		</fail>
++	</target>
++
++	<target name="prepare-atlas" unless="atlas-running">
++		<antcall target="unzip-atlas"/>
++		<antcall target="enable-atlas-ssl"/>
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="import-atlas-sampledata-win" if="is-windows">
++		<echo message="Importing sample data" />
++		<exec executable="cmd">
++			<env key="JAVA_HOME" value="${java.home}" />
++			<arg value="/c" />
++			<arg value="${atlas-unpack-dir}/${atlas-dir}/bin/quick_start.py" />
++		</exec>
++
++		<echo message="Atlas test instance brought up" />
++	</target>
++
++	<target name="import-atlas-sampledata-unix" if="is-unix">
++		<echo message="Importing sample data" />
++		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
++			<env key="JAVA_HOME" value="${java.home}" />
++			<arg value="quick_start.py" />
++		</exec>
++
++		<echo message="Atlas test instance brought up" />
++	</target>
++
++	<target name="import-atlas-sampledata" depends="import-atlas-sampledata-win,import-atlas-sampledata-unix">
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="select-atlas-config-file-windows" if="is-windows">
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_windows" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
++		<echo message="Using atlas SSL configuration for Windows." />
++	</target>
++
++	<target name="select-atlas-config-file-mac" if="is-mac">
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_mac" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
++		<echo message="Using atlas SSL configuration for Mac OS." />
++	</target>
++
++	<target name="select-atlas-config-file-unix" if="is-unix">
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_linux" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
++		<echo message="Using atlas SSL configuration for Unix." />
++	</target>
++
++	<target name="select-atlas-config-file" depends="select-atlas-config-file-unix,select-atlas-config-file-windows,select-atlas-config-file-mac">
++	</target>
++
++	<target name="unquote-colons-in-atlas-config-file">
++		<!-- The following replacement is needed because the ant propertyfile task quotes colons and backslashed-->
++		<replace file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
++			<replacetoken>\:</replacetoken>
++			<replacevalue>:</replacevalue>
++		</replace>
++		<replace file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
++			<replacetoken>\\</replacetoken>
++			<replacevalue>\</replacevalue>
++		</replace>
++	</target>
++
++	<target name="enable-atlas-ssl">
++		<!-- For Atlas security features see: http://atlas.incubator.apache.org/Security.html -->
++		<echo message="Updating atlas-application.properties file..." />
++		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
++			<entry  key="cert.stores.credential.provider.path" value="jceks://file/${sys:atlas.home}/conf/keystore_openjdk.jceks"/>
++			<entry  key="atlas.enableTLS" value="true"/>
++			<entry  key="truststore.file" value="${sys:atlas.home}/conf/keystore_openjdk.jks"/>
++			<entry  key="keystore.file" value="${sys:atlas.home}/conf/keystore_openjdk.jks"/>
++			<entry  key="atlas.server.https.port" value="21443"/>
++			<entry  key="atlas.DeleteHandler.impl" value="org.apache.atlas.repository.graph.HardDeleteHandler"/>
++			<entry  key="atlas.TypeCache.impl" value="org.apache.atlas.repository.typestore.StoreBackedTypeCache"/>
++		</propertyfile>
++		<antcall target="unquote-colons-in-atlas-config-file"/>
++		<!-- Keep this version of the config file for Mac (using oracle/open jdk) -->
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_mac" overwrite="true"/>
++
++		<!-- Create separate version of config file for Linux (using ibm jdk) -->
++		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
++			<entry  key="cert.stores.credential.provider.path" value="jceks://file/${sys:atlas.home}/conf/keystore_ibmjdk.jceks"/>
++			<entry  key="truststore.file" value="${sys:atlas.home}/conf/keystore_ibmjdk.jks"/>
++			<entry  key="keystore.file" value="${sys:atlas.home}/conf/keystore_ibmjdk.jks"/>
++		</propertyfile>
++		<antcall target="unquote-colons-in-atlas-config-file"/>
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_linux" overwrite="true"/>
++
++		<!-- Create separate version of config file for Windows (using ibm jdk and hardcoded credential provider file (issue #94)) -->
++		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
++			<entry  key="cert.stores.credential.provider.path" value="jceks://file/C\:/tmp/${atlas-dir}/conf/keystore_ibmjdk.jceks"/>
++		</propertyfile>
++		<antcall target="unquote-colons-in-atlas-config-file"/>
++		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_windows" overwrite="true"/>
++
++		<!-- keystore.jceks file is stored in Box@IBM - Re-generate the file using Atlas command bin/cputil.sh -->
++		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
++		<get verbose="true" src="https://ibm.box.com/shared/static/uyzqeayk5ut5f5fqnlvm8nhn9ixb642d.jceks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_openjdk.jceks" />
++		<get verbose="true" src="https://ibm.box.com/shared/static/ibopoyukw7uhbt83a1zu33nwvnamht3j.jceks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_ibmjdk.jceks" />
++		<!-- keystore.jks file is stored in Box@IBM - Re-generate the file using the Java keytool -->
++		<!-- command: keytool -genkey -alias myatlas -keyalg RSA -keystore /tmp/atlas-security/keystore.jks -keysize 2048 -->
++		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
++		<get verbose="true" src="https://ibm.box.com/shared/static/odnmhqua5sdue03z43vqsv0lp509ov70.jks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_openjdk.jks" />
++		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_ibmjdk.jks" />
++
++		<antcall target="select-atlas-config-file"/>
++		<echo message="Atlas SSL has been enabled." />
++		<!-- On windows, Atlas needs to be re-started again in order for the kafka queues to come up properly -->
++		<antcall target="restart-atlas-on-windows" />
++	</target>
++
++	<!-- ****************************************************************************************** -->
++	<target name="print-info" if="use.running.atlas">
++		<echo message="Don't start/stop Atlas because use.running.atlas is set" />
++	</target>
++
++	<target name="clean-atlas" depends="print-info" unless="use.running.atlas">
++		<echo message="Cleaning Atlas" />
++		<antcall target="remove-atlas-dir"/>
++		<antcall target="reset-derby-data"/>
++	</target>
++
++	<target name="ensure-atlas-running" depends="print-info" unless="use.running.atlas">
++		<echo message="Ensure that Atlas is running" />
++		<antcall target="prepare-atlas" />
++		<antcall target="start-atlas"/>
++		<antcall target="check-atlas-url"/>
++		<echo message="Atlas is running" />
++	</target>
++
++</project>
+diff --git a/odf/odf-atlas/pom.xml b/odf/odf-atlas/pom.xml
+new file mode 100755
+index 0000000..cc714e6
+--- /dev/null
++++ b/odf/odf-atlas/pom.xml
+@@ -0,0 +1,216 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns:if="ant:if">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-atlas</artifactId>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-store</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-spark</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-spark</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.derby</groupId>
++			<artifactId>derby</artifactId>
++			<version>10.12.1.1</version>
++			<scope>test</scope>
++		</dependency>
++	</dependencies>
++
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-failsafe-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++						<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>
++						<atlas.url>${atlas.url}</atlas.url>
++						<atlas.user>${atlas.user}</atlas.user>
++						<atlas.password>${atlas.password}</atlas.password>
++					</systemPropertyVariables>
++					<dependenciesToScan>
++						<dependency>org.apache.atlas.odf:odf-core</dependency>
++					</dependenciesToScan>
++					<includes>
++						<include>**/integrationtest/**/**.java</include>
++					</includes>
++				</configuration>
++				<executions>
++					<execution>
++						<id>integration-test</id>
++						<goals>
++							<goal>integration-test</goal>
++						</goals>
++					</execution>
++					<execution>
++						<id>verify</id>
++						<goals>
++							<goal>verify</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
++						<odf.build.project.name>${project.name}</odf.build.project.name>
++						<atlas.url>${atlas.url}</atlas.url>
++						<atlas.user>${atlas.user}</atlas.user>
++						<atlas.password>${atlas.password}</atlas.password>
++					</systemPropertyVariables>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-antrun-plugin</artifactId>
++				<version>1.8</version>
++				<executions>
++					<execution>
++						<inherited>false</inherited>
++						<id>clean-atlas</id>
++						<phase>clean</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target>
++								<property name="atlas-unpack-dir" value="/tmp"/>
++								<property name="atlas.version" value="${atlas.version}"/>
++								<ant antfile="build_atlas.xml" target="clean-atlas"/>
++							</target>
++						</configuration>
++					</execution>
++					<execution>
++						<id>ensure-atlas-running</id>
++						<phase>process-test-classes</phase>
++						<!-- <phase>pre-integration-test</phase> -->
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target unless="skipTests">
++								<property name="atlas-unpack-dir" value="/tmp" />
++								<property name="atlas.version" value="${atlas.version}" />
++								<ant antfile="build_atlas.xml" target="ensure-atlas-running"></ant>
++							</target>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++
++	<profiles>
++		<profile>
++			<id>atlas</id>
++			<build>
++				<plugins>
++					<plugin>
++						<groupId>org.apache.maven.plugins</groupId>
++						<artifactId>maven-antrun-plugin</artifactId>
++						<version>1.8</version>
++						<executions>
++							<!-- Start Atlas even in order to have it available for the test-env when skipping the tests  -->
++							<execution>
++								<id>ensure-atlas-running</id>
++								<phase>process-test-classes</phase>
++								<!-- <phase>pre-integration-test</phase> -->
++								<goals>
++									<goal>run</goal>
++								</goals>
++								<configuration>
++									<target unless="skipTests">
++										<property name="atlas-unpack-dir" value="/tmp" />
++										<property name="atlas.version" value="${atlas.version}" />
++										<ant antfile="build_atlas.xml" target="ensure-atlas-running"></ant>
++									</target>
++								</configuration>
++							</execution>
++						</executions>
++					</plugin>
++				</plugins>
++			</build>
++		</profile>
++	</profiles>
++
++</project>
+diff --git a/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java
+new file mode 100755
+index 0000000..04a1bc3
+--- /dev/null
++++ b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java
+@@ -0,0 +1,842 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata.atlas;
++
++import java.io.ByteArrayInputStream;
++import java.io.IOException;
++import java.io.InputStream;
++import java.net.URI;
++import java.net.URISyntaxException;
++import java.security.GeneralSecurityException;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.LinkedHashMap;
++import java.util.List;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.connectivity.RESTClientManager;
++import org.apache.atlas.odf.api.metadata.*;
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.metadata.SampleDataHelper;
++import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
++import org.apache.atlas.odf.core.metadata.WritableMetadataStoreBase;
++import org.apache.atlas.odf.core.metadata.WritableMetadataStoreUtils;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.StatusLine;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.utils.URIBuilder;
++import org.apache.http.entity.ContentType;
++import org.apache.wink.json4j.JSONArray;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
++import org.apache.atlas.odf.api.metadata.AtlasMetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.RESTMetadataStoreHelper;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import com.google.common.collect.Lists;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
++
++// TODO properly escape all URLs when constructed as string concatenation
++
++/**
++ *
++ * A MetadataStore implementation for accessing metadata stored in an atlas instance
++ *
++ */
++public class AtlasMetadataStore extends WritableMetadataStoreBase implements MetadataStore, WritableMetadataStore {
++	private Logger logger = Logger.getLogger(AtlasMetadataStore.class.getName());
++
++	private static HashMap<String, StoredMetaDataObject> objectStore; // Not actually used but required to meet needs of InternalMetadataStoreBase
++	protected LinkedHashMap<String, StoredMetaDataObject> stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
++	protected static Object accessLock = new Object();
++
++	private String url;
++
++	private String storeId;
++
++	private RESTClientManager restClient;
++
++	private AtlasModelBridge modelBridge;
++
++	static String ATLAS_API_INFIX = "/api/atlas";
++
++	private void constructThis(String url, String user, String password) throws URISyntaxException {
++		this.url = url;
++		this.storeId = "atlas:" + url;
++		this.restClient = new RESTClientManager(new URI(url), user, password);
++		this.modelBridge = new AtlasModelBridge(this);
++	}
++
++	public AtlasMetadataStore() throws URISyntaxException {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		String atlasURL = env.getProperty("atlas.url");
++		String atlasUser = env.getProperty("atlas.user");
++		String atlasPassword = env.getProperty("atlas.password");
++		if ((atlasURL == null) || atlasURL.isEmpty() || (atlasUser == null) || atlasUser.isEmpty() || (atlasPassword == null) || atlasPassword.isEmpty())  {
++			throw new RuntimeException("The system variables \"atlas.url\", \"atlas.user\", and \"atlas.password\" must be set.");
++		}
++		constructThis(atlasURL, atlasUser, Encryption.decryptText(atlasPassword));
++	}
++
++	protected Object getAccessLock() {
++		return accessLock;
++	}
++
++	// Not actually used but required to meet needs of InternalMetadataStoreBase
++	protected HashMap<String, StoredMetaDataObject> getObjects() {
++		return objectStore;
++	}
++
++	protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects() {
++		return stagedObjects;
++	}
++
++	public static final int TIMEOUT = 2000;
++
++	static Object ensureTypesLock = new Object();
++
++	public void ensureODFTypesExist() {
++		synchronized (ensureTypesLock) {
++			try {
++				String typesTestURI = this.url + ATLAS_API_INFIX + "/types/MetaDataObject";
++				Executor executor = this.restClient.getAuthenticatedExecutor();
++				HttpResponse httpResponse = executor.execute(Request.Get(typesTestURI)).returnResponse();
++
++				StatusLine statusLine = httpResponse.getStatusLine();
++				int statusCode = statusLine.getStatusCode();
++				if (statusCode == HttpStatus.SC_OK) {
++					return;
++				}
++				if (statusCode != HttpStatus.SC_NOT_FOUND) {
++					throw new MetadataStoreException("An error occurred when checking for Atlas types. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
++				}
++				// now create types
++				InputStream is = this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json");
++				Request createTypesRequest = Request.Post(this.url + ATLAS_API_INFIX + "/types");
++				createTypesRequest.bodyStream(is, ContentType.APPLICATION_JSON);
++				httpResponse = executor.execute(createTypesRequest).returnResponse();
++				statusLine = httpResponse.getStatusLine();
++				statusCode = statusLine.getStatusCode();
++				if (statusCode != HttpStatus.SC_CREATED) {
++					throw new MetadataStoreException("An error occurred while creating ODF types in Atlas. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
++				}
++			} catch (GeneralSecurityException | IOException e) {
++				logger.log(Level.FINE, "An unexpected exception ocurred while connecting to Atlas", e);
++				throw new MetadataStoreException(e);
++			}
++		}
++
++	}
++
++	private void checkConnectivity() {
++		ensureODFTypesExist();
++	}
++
++	/* Filter out all types that exist.
++	 * This is necessary because trying to create a type multiple times
++	 * will lead to a 503 error after Atlas is restarted with an error saying
++	 * "Type extends super type multiple times"
++	 *
++	 * Returns true if some filteringTookPlace
++	 *
++	 * Note: Trying to remove the super types from the request doesn't work either.
++	 */
++	boolean filterExistingTypes(JSONObject atlasTypeDefinitions, String typeProperty) throws GeneralSecurityException, IOException {
++		boolean filterWasApplied = false;
++		JSONArray types = (JSONArray) atlasTypeDefinitions.opt(typeProperty);
++		JSONArray newTypes = new JSONArray();
++		for (Object typeObj : types) {
++			JSONObject type = (JSONObject) typeObj;
++
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			String typeName = (String) type.opt("typeName");
++			if (typeName != null) {
++				Request checkTypeRequest = Request.Get(this.url + ATLAS_API_INFIX + "/types/" + typeName);
++				HttpResponse httpResponse = executor.execute(checkTypeRequest).returnResponse();
++				StatusLine statusLine = httpResponse.getStatusLine();
++				int statusCode = statusLine.getStatusCode();
++				if (statusCode != HttpStatus.SC_NOT_FOUND) {
++					// type already exists, don't create it
++					filterWasApplied = true;
++					logger.log(Level.FINE, "Atlas type ''{0}'' already exists, don't create it again", typeName);
++				} else {
++					newTypes.add(type);
++				}
++			}
++		}
++
++		try {
++			atlasTypeDefinitions.put(typeProperty, newTypes);
++		} catch (JSONException e) {
++			throw new RuntimeException(e); // should never happen as only proper JSONObjects are used
++		}
++		return filterWasApplied;
++	}
++
++	boolean isInvalidTypeRequest(JSONObject atlasTypeDefinition) {
++		return ((JSONArray) atlasTypeDefinition.opt("structTypes")).isEmpty() //
++				&& ((JSONArray) atlasTypeDefinition.opt("enumTypes")).isEmpty() //
++				&& ((JSONArray) atlasTypeDefinition.opt("classTypes")).isEmpty() //
++				&& ((JSONArray) atlasTypeDefinition.opt("traitTypes")).isEmpty();
++	}
++
++	void checkUpdateForKnownType(JSONObject atlasTypeDefinition) {
++		JSONArray types = (JSONArray) atlasTypeDefinition.opt("classTypes");
++		for (Object o : types) {
++			JSONObject type = (JSONObject) o;
++			String typeName = (String) type.opt("typeName");
++			if ("ODFAnnotation".equals(typeName)) {
++				String msg = MessageFormat.format("Update of type ''{0}'' is not allowed", typeName);
++				throw new MetadataStoreException(msg);
++			}
++		}
++	}
++
++	public boolean createType(JSONObject atlasTypeDefinition) {
++		try {
++			logger.log(Level.FINE, "Creating types with definition: {0}", atlasTypeDefinition.write());
++			checkConnectivity();
++			boolean filterWasApplied = this.filterExistingTypes(atlasTypeDefinition, "classTypes");
++			filterWasApplied |= this.filterExistingTypes(atlasTypeDefinition, "structTypes");
++			String typesDef = atlasTypeDefinition.write();
++			if (filterWasApplied) {
++				logger.log(Level.FINE, "Modified type definitions after filtering exiting types: {0}", typesDef);
++			}
++			if (isInvalidTypeRequest(atlasTypeDefinition)) {
++				logger.log(Level.FINE, "No types left to be created after filtering, skipping");
++				return false;
++			}
++			Executor executor = this.restClient.getAuthenticatedExecutor();
++			Request createTypesRequest = Request.Put(this.url + ATLAS_API_INFIX + "/types");
++			createTypesRequest.bodyStream(new ByteArrayInputStream(typesDef.getBytes("UTF-8")), ContentType.APPLICATION_JSON);
++			HttpResponse httpResponse = executor.execute(createTypesRequest).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int statusCode = statusLine.getStatusCode();
++			if (statusCode != HttpStatus.SC_OK) {
++				throw new MetadataStoreException("An error occurred while creating ODF types in Atlas. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
++			}
++			logger.log(Level.FINE, "Types created. Original request: {0}", typesDef);
++		} catch (GeneralSecurityException | IOException | JSONException e) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", e);
++			throw new MetadataStoreException(e);
++		}
++		return true;
++
++	}
++
++	public JSONObject getAtlasTypeDefinition(String typeName) {
++		try {
++			checkConnectivity();
++			HttpResponse httpResponse = this.restClient.getAuthenticatedExecutor().execute(Request.Get(this.url + ATLAS_API_INFIX + "/types/" + typeName)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int statusCode = statusLine.getStatusCode();
++			if (statusCode == HttpStatus.SC_OK) {
++				InputStream is = httpResponse.getEntity().getContent();
++				JSONObject typeResp = new JSONObject(is);
++				is.close();
++				return typeResp;
++			}
++			return null;
++		} catch (GeneralSecurityException | IOException | JSONException e) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", e);
++			throw new MetadataStoreException(e);
++		}
++	}
++
++    @Override
++    public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
++   		return WritableMetadataStoreUtils.getConnectionInfo(this, informationAsset);
++    };
++
++	@Override
++	public MetaDataObject retrieve(MetaDataObjectReference reference) {
++		checkConnectivity();
++		synchronized (updateLock) {
++			return this.retrieve(reference, 0);
++		}
++	}
++
++	MetaDataObject retrieve(MetaDataObjectReference reference, int level) {
++		JSONObject objectJson = retrieveAtlasEntityJson(reference);
++		if (objectJson == null) {
++			return null;
++		}
++		try {
++			MetaDataObject mdo = this.modelBridge.createMetaDataObjectFromAtlasEntity(objectJson, level);
++			return mdo;
++		} catch (JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	JSONObject retrieveAtlasEntityJson(MetaDataObjectReference reference) {
++		modelBridge.checkReference(reference);
++		String id = reference.getId();
++		try {
++			String resource = url + ATLAS_API_INFIX + "/entities/" + id;
++			HttpResponse httpResponse = this.restClient.getAuthenticatedExecutor().execute(Request.Get(resource)).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code == HttpStatus.SC_NOT_FOUND) {
++				return null;
++			}
++			if (code != HttpStatus.SC_OK) {
++				String msg = MessageFormat.format("Retrieval of object ''{0}'' failed: HTTP request status: ''{1}'', {2}",
++						new Object[] { id, statusLine.getStatusCode(), statusLine.getReasonPhrase() });
++				throw new MetadataStoreException(msg);
++			} else {
++				InputStream is = httpResponse.getEntity().getContent();
++				JSONObject jo = new JSONObject(is);
++				is.close();
++				return jo;
++			}
++		} catch (GeneralSecurityException | IOException | JSONException exc) {
++			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", exc);
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	// TODO only helps in single server case
++	// this is just a temporary workaround around the fact that Atlas does not update bidirectional
++	// references.
++	// TODO this currently prevents deadlocks from happening, needs to be reworked for distributed case
++	static Object updateLock = new Object();
++
++	private MetaDataObjectReference storeJSONObject(JSONObject jsonObject) {
++		logger.log(Level.FINEST, "Storing converted Atlas object: {0}.", JSONUtils.jsonObject4Log(jsonObject));
++		synchronized(updateLock) {
++			try {
++				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++				HttpResponse atlasResponse = restExecutor.execute( //
++						Request.Post(this.url + ATLAS_API_INFIX + "/entities") //
++								.bodyString(jsonObject.write(), ContentType.APPLICATION_JSON) //
++				).returnResponse();
++				InputStream is = atlasResponse.getEntity().getContent();
++				JSONObject atlasResult = new JSONObject(is);
++				is.close();
++				StatusLine line = atlasResponse.getStatusLine();
++				int statusCode = line.getStatusCode();
++				if (statusCode != HttpStatus.SC_CREATED) {
++					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
++					logger.log(Level.WARNING, "Atlas could not create object for request: {0}", jsonObject.write());
++					logger.log(Level.WARNING, "Atlas result for creating object: {0}", atlasResult.write());
++					throw new MetadataStoreException(
++							MessageFormat.format("Atlas REST call failed, return code: {0}, reason: {1}, details: {2}", new Object[] { statusCode, line.getReasonPhrase(), atlasResult.write() }));
++				}
++				logger.log(Level.FINEST, "Atlas response for storing object: {0}", JSONUtils.jsonObject4Log(atlasResult));
++				JSONArray ids = (JSONArray) ((JSONObject) atlasResult.get("entities")).get("created");
++				if (ids.size() != 1) {
++					String msg = "More than one (or no) Atlas entities have been created. Need a unique entity to be referenced by other objects.";
++					throw new MetadataStoreException(msg);
++				}
++				String newAnnotationId = (String) ids.get(0);
++				MetaDataObjectReference result = new MetaDataObjectReference();
++				result.setRepositoryId(getRepositoryId());
++				result.setId(newAnnotationId);
++				result.setUrl(getURL(newAnnotationId));
++				return result;
++			} catch (JSONException e) {
++				throw new MetadataStoreException(MessageFormat.format("Error converting JSON object ''{0}'' to string", JSONUtils.jsonObject4Log(jsonObject)), e);
++			} catch(IOException | GeneralSecurityException e2) {
++				throw new MetadataStoreException(MessageFormat.format("Error storing object ''{0}'' in Atlas", JSONUtils.jsonObject4Log(jsonObject)), e2);
++			}
++		}
++	}
++
++	private void updateJSONObject(JSONObject jsonObject, String id) {
++		logger.log(Level.FINEST, "Updating converted Atlas object: {0}.",JSONUtils.jsonObject4Log(jsonObject));
++		synchronized(updateLock) {
++			try {
++				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++				HttpResponse atlasResponse = restExecutor.execute( //
++						Request.Post(this.url + ATLAS_API_INFIX + "/entities/" + id) //
++								.bodyString(jsonObject.write(), ContentType.APPLICATION_JSON) //
++				).returnResponse();
++				InputStream is = atlasResponse.getEntity().getContent();
++				JSONObject atlasResult = new JSONObject(is);
++				is.close();
++				StatusLine line = atlasResponse.getStatusLine();
++				int statusCode = line.getStatusCode();
++				if (statusCode != HttpStatus.SC_OK) {
++					logger.log(Level.WARNING, "Atlas could not update object with request: {0}", jsonObject.write());
++					throw new MetadataStoreException(
++							MessageFormat.format("Atlas REST call failed, return code: {0}, reason: {1}, details: {2}", new Object[] { statusCode, line.getReasonPhrase(), atlasResult.write() }));
++				}
++				logger.log(Level.FINEST, "Atlas response for updating object: {0}", JSONUtils.jsonObject4Log(atlasResult));
++			} catch (JSONException e) {
++				throw new MetadataStoreException(MessageFormat.format("Error converting JSON object ''{0}'' to string", JSONUtils.jsonObject4Log(jsonObject)), e);
++			} catch(IOException | GeneralSecurityException e2) {
++				throw new MetadataStoreException(MessageFormat.format("Error storing object ''{0}'' in Atlas", JSONUtils.jsonObject4Log(jsonObject)), e2);
++			}
++		}
++	}
++
++	private MetaDataObjectReference store(Annotation annot) {
++		checkConnectivity();
++		synchronized (updateLock) {
++			try {
++				JSONObject annotationJSON = this.modelBridge.createAtlasEntityJSON(new StoredMetaDataObject(annot), new HashMap<String, String>(), new HashMap<String, MetaDataObjectReference>(), null);
++				MetaDataObjectReference newObjectRef = storeJSONObject(annotationJSON);
++
++				////////////////////////////////////////
++				// set inverse explicitly, remove this until Atlas does it automatically
++
++				// first get full annotated object
++				String annotatedObjectId = AnnotationStoreUtils.getAnnotatedObject(annot).getId();
++				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++				HttpResponse atlasResponse = restExecutor.execute(Request.Get(this.url + ATLAS_API_INFIX + "/entities/" + annotatedObjectId)).returnResponse();
++				StatusLine line = atlasResponse.getStatusLine();
++				int statusCode = line.getStatusCode();
++				if (statusCode != HttpStatus.SC_OK) {
++					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
++					logger.log(Level.WARNING, "Atlas could not retrieve annotated object: {0}", annotatedObjectId);
++					return null;
++				}
++
++				InputStream is = atlasResponse.getEntity().getContent();
++				JSONObject annotatedObject = new JSONObject(is).getJSONObject("definition");
++				is.close();
++				JSONObject annotatedObjectValues = ((JSONObject) annotatedObject.get("values"));
++				JSONArray annotations = (JSONArray) annotatedObjectValues.opt("annotations");
++
++				// add new "annotations" object to list
++				if (annotations == null) {
++					annotations = new JSONArray();
++					annotatedObjectValues.put("annotations", annotations);
++				}
++				JSONObject annotationRef = modelBridge.createAtlasObjectReference(newObjectRef.getId(), "ODFAnnotation");
++				annotations.add(annotationRef);
++
++				// now update
++				atlasResponse = restExecutor.execute(Request.Post(this.url + ATLAS_API_INFIX + "/entities/" + annotatedObjectId).bodyString(annotatedObject.write(), ContentType.APPLICATION_JSON))
++						.returnResponse();
++				line = atlasResponse.getStatusLine();
++				statusCode = line.getStatusCode();
++				if (statusCode != HttpStatus.SC_OK) {
++					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
++					logger.log(Level.WARNING, "Atlas could not update annotated object: {0}", annotatedObjectId);
++					return null;
++				}
++
++				return newObjectRef;
++			} catch (MetadataStoreException e) {
++				throw e;
++			} catch (Exception e) {
++				throw new MetadataStoreException(e);
++			}
++		}
++	}
++
++	private boolean deleteAcyclic(MetaDataObjectReference reference,
++			                      HashSet<MetaDataObjectReference> referencesProcessed) {
++		try {
++			List<Annotation> annotations = this.getAnnotations(reference, null);
++			if (annotations != null) {
++				for (Annotation annotation : annotations) {
++					if (referencesProcessed.contains(annotation.getReference())) {
++						throw new MetadataStoreException("Circular annotation definition found: " + annotation.getReference().getRepositoryId());
++					}
++					referencesProcessed.add(annotation.getReference());
++					deleteAcyclic(annotation.getReference(), referencesProcessed);
++				}
++			}
++			URIBuilder uri = new URIBuilder(url + ATLAS_API_INFIX + "/entities").addParameter("guid", reference.getId());
++			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++			HttpResponse httpResponse = restExecutor.execute(Request.Delete(uri.build())).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
++			}
++			InputStream is = httpResponse.getEntity().getContent();
++			JSONObject jo = new JSONObject(is);
++			is.close();
++			if (jo.containsKey("entities")) {
++				JSONObject entities = jo.getJSONObject("entities");
++				if (entities.containsKey("deleted")) {
++					JSONArray deleted = entities.getJSONArray("deleted");
++					return (deleted.size() == 1 && deleted.getString(0).equals(reference.getId()));
++				}
++			}
++			return false;
++		} catch(Exception exc) {
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	// TODO: Implement 'delete cascade'. Currently this only works for annotations but not for other types of object relationships
++
++	private boolean delete(MetaDataObjectReference reference) {
++		checkConnectivity();
++		return deleteAcyclic(reference, new HashSet<MetaDataObjectReference>());
++	}
++
++	@Override
++	public Properties getProperties() {
++		Properties props = new Properties();
++		props.put(STORE_PROPERTY_DESCRIPTION, MessageFormat.format("An Atlas metadata repository at ''{0}''", url));
++		props.put(STORE_PROPERTY_TYPE, "atlas");
++		props.put(STORE_PROPERTY_ID, this.storeId);
++		props.put("atlas.url", url);
++		return props;
++	}
++
++	/**
++	 * Returns a "human-readable" URL for this object, typically pointing to the Atlas UI.
++	 */
++	public String getURL(String guid) {
++		return url + "/#!/detailPage/" + guid;
++	}
++
++	public String getAtlasUrl() {
++		return this.url;
++	}
++
++	@Override
++	/**
++	 * Search query is passed into generic API (Gremlin, DSL, or fulltext) are selected under the covers.
++	 */
++	public List<MetaDataObjectReference> search(String query) {
++		checkConnectivity();
++		try {
++			URIBuilder uri = null;
++			HttpResponse httpResponse = null;
++			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++			if (query.startsWith("g.V")) {
++				uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search/gremlin").addParameter("query", query);
++				httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
++			} else {
++				uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search").addParameter("query", query);
++				httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
++			}
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
++			}
++			InputStream is = httpResponse.getEntity().getContent();
++			JSONObject jo = new JSONObject(is);
++			is.close();
++			String querytype = (String) jo.get("queryType");
++
++			String repoId = getRepositoryId();
++			List<MetaDataObjectReference> resultMDORs = new ArrayList<>();
++			JSONArray resultList = (JSONArray) jo.get("results");
++			for (Object o : resultList) {
++				JSONObject result = (JSONObject) o;
++				String guid = null;
++				// get GUID differently depending on the query type
++				if ("gremlin".equals(querytype)) {
++					guid = (String) result.get("__guid");
++				} else if ("dsl".equals(querytype)) {
++					guid = (String) ((JSONObject) result.get("$id$")).get("id");
++				} else {
++					guid = (String) result.get("guid");
++				}
++				MetaDataObjectReference ref = new MetaDataObjectReference();
++				ref.setId(guid);
++				ref.setRepositoryId(repoId);
++				ref.setUrl(getURL(guid));
++				resultMDORs.add(ref);
++			}
++			return resultMDORs;
++		} catch (Exception exc) {
++			throw new MetadataStoreException(exc);
++		}
++
++	}
++
++	@Override
++	public String getRepositoryId() {
++		return this.storeId;
++	}
++
++	@Override
++	public MetadataStore.ConnectionStatus testConnection() {
++		return RESTMetadataStoreHelper.testConnectionForStaticURL(restClient, url);
++	}
++
++	// Make sure Atlas objects are deleted in a particular order according to foreign key relationships to prevent objects from becoming orphans
++	private static final String[] deletionSequence = new String[]{"Annotation", "BusinessTerm", "DataStore", "DataFileFolder", "DataSet" };
++
++	@Override
++	public void resetAllData() {
++		logger.info("Resetting all data on the metadata repository");
++		for (String typeToDelete:deletionSequence) {
++			List<MetaDataObjectReference> refs = this.search("from " + typeToDelete);
++			int i = 0;
++			for (MetaDataObjectReference ref : refs) {
++				try {
++					this.delete(ref);
++					i++;
++				} catch(Exception exc) {
++					logger.log(Level.WARNING, MessageFormat.format("Object ''{0}'' could not be deleted", ref.getId()), exc);
++				}
++			}
++			logger.info(i + " objects of type " + typeToDelete + " deleted.");
++		}
++	}
++
++	public Annotation retrieveAnnotation(MetaDataObjectReference annotationRef) {
++		MetaDataObject mdo = this.retrieve(annotationRef);
++		if (mdo instanceof Annotation) {
++			return (Annotation) mdo;
++		}
++		throw new MetadataStoreException(MessageFormat.format("Object with id ''{0}'' is not an annotation", annotationRef.getId()));
++	}
++
++
++
++	@SuppressWarnings("unchecked")
++	private List<JSONObject> runAnnotationQuery(String query) {
++		try {
++			List<JSONObject> results = new ArrayList<>();
++			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
++			URIBuilder uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search/dsl").addParameter("query",
++					query);
++			HttpResponse httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
++			StatusLine statusLine = httpResponse.getStatusLine();
++			int code = statusLine.getStatusCode();
++			if (code != HttpStatus.SC_OK) {
++				throw new MetadataStoreException(
++						"Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
++			}
++			InputStream is = httpResponse.getEntity().getContent();
++			JSONObject jo = new JSONObject(is);
++			is.close();
++			results.addAll(jo.getJSONArray("results"));
++			return results;
++		} catch (Exception exc) {
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	private String combineToWhereClause(List<String> clauses) {
++		StringBuilder whereClause = null;
++		for (String clause : clauses) {
++			if (clause != null) {
++				if (whereClause == null) {
++					whereClause = new StringBuilder("where ");
++					whereClause.append(clause);
++				} else {
++					whereClause.append(" and ").append(clause);
++				}
++			}
++		}
++		if (whereClause == null) {
++			whereClause = new StringBuilder("");
++		}
++		return whereClause.toString();
++	}
++
++	private List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId) {
++		checkConnectivity();
++
++		String profilingAnnotationObjectClause = null;
++		String classificationAnnotationObjectClause = null;
++		String analysisRequestClause = null;
++		if (object != null) {
++		 	profilingAnnotationObjectClause = "t.profiledObject.__guid = '" + object.getId() + "'";
++		 	classificationAnnotationObjectClause = "t.classifiedObject.__guid = '" + object.getId() + "'";
++		}
++		if (analysisRequestId != null) {
++			analysisRequestClause = "t.analysisRun = '" + analysisRequestId + "'";
++		}
++
++		List<JSONObject> queryResults = new ArrayList<>();
++		queryResults.addAll(runAnnotationQuery(
++				"from ProfilingAnnotation as t " + combineToWhereClause(Arrays.asList(new String[]{profilingAnnotationObjectClause, analysisRequestClause})) ));
++		queryResults.addAll(runAnnotationQuery(
++				"from ClassificationAnnotation as t " + combineToWhereClause(Arrays.asList(new String[]{classificationAnnotationObjectClause, analysisRequestClause})) ));
++		// TODO relationship annotation
++
++		try {
++			List<Annotation> results = new ArrayList<>();
++			for (JSONObject jo : queryResults) {
++				results.add((Annotation) this.modelBridge.createMetaDataObjectFromAtlasSearchResult(jo, 0));
++			}
++			return results;
++		} catch (Exception exc) {
++			exc.printStackTrace();
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	@Override
++	public void createSampleData() {
++		logger.log(Level.INFO, "Creating sample data in metadata store.");
++		SampleDataHelper.copySampleFiles();
++		WritableMetadataStoreUtils.createSampleDataObjects(this);
++	}
++
++	@Override
++	public MetadataQueryBuilder newQueryBuilder() {
++		return new AtlasMetadataQueryBuilder();
++	}
++
++	public static void main(String[] args) {
++		try {
++			System.out.println("Creating Atlas sample data.");
++			AtlasMetadataStore mds = new AtlasMetadataStore();
++			mds.createSampleData();
++		} catch (Exception e) {
++			e.printStackTrace();
++		}
++	}
++
++	@Override
++	public AnnotationPropagator getAnnotationPropagator() {
++		return new AnnotationPropagator() {
++
++			@Override
++			public void propagateAnnotations(AnnotationStore as, String requestId) {
++				if (as instanceof AtlasMetadataStore) {
++					// do nothing, annotations already persisted
++					return;
++				}
++				// if this is another annotation store, simply store the annotations as-is
++				List<Annotation> annotations = as.getAnnotations(null, requestId);
++				for (Annotation annot : annotations) {
++					store(annot);
++				}
++			}
++		};
++	}
++
++	@Override
++	public void commit() {
++		checkConnectivity();
++		HashMap<String, StoredMetaDataObject> objectHashMap = new HashMap<String, StoredMetaDataObject>();
++		HashMap<String, String> typeMap = new HashMap<String, String>();
++		for (StoredMetaDataObject object : stagedObjects.values()) {
++			MetaDataObjectReference objRef = object.getMetaDataObject().getReference();
++			modelBridge.checkReference(objRef);
++			objectHashMap.put(objRef.getId(), object);
++			typeMap.put(objRef.getId(), object.getMetaDataObject().getClass().getSimpleName());
++		}
++
++		// Create a list of all objects, starting with "root objects" that do not have dependencies on the subsequent objects
++		List<StoredMetaDataObject> objectsToCreate = new ArrayList<StoredMetaDataObject>();
++		int numberOfObjectsToCreate;
++		do {
++			List<StoredMetaDataObject> rootObjectList = modelBridge.getRootObjects(objectHashMap);
++			numberOfObjectsToCreate = objectsToCreate.size();
++			objectsToCreate.addAll(rootObjectList);
++			for (StoredMetaDataObject rootObject : rootObjectList) {
++				objectHashMap.remove(rootObject.getMetaDataObject().getReference().getId());
++			}
++		} while((objectHashMap.size() > 0) && (objectsToCreate.size() > numberOfObjectsToCreate));
++
++		// Process object list in reverse order so that dependent objects are created first
++		HashMap<String, MetaDataObjectReference> referenceMap = new HashMap<String, MetaDataObjectReference>();
++		for (StoredMetaDataObject obj : Lists.reverse(objectsToCreate)) {
++			if (retrieve(obj.getMetaDataObject().getReference()) != null) {
++				// Update existing object
++				JSONObject originalAtlasJson = retrieveAtlasEntityJson(obj.getMetaDataObject().getReference());
++				JSONObject newObjectJSON = modelBridge.createAtlasEntityJSON(obj, typeMap, referenceMap, originalAtlasJson);
++				logger.log(Level.INFO, "Updating object of type ''{0}'' in metadata store: ''{1}''", new Object[] { obj.getClass().getName(), newObjectJSON });
++				updateJSONObject(newObjectJSON, obj.getMetaDataObject().getReference().getId());
++			} else {
++				// Create new object
++				JSONObject newObjectJSON = modelBridge.createAtlasEntityJSON(obj, typeMap, referenceMap, null);
++				logger.log(Level.INFO, "Storing new object of type ''{0}'' in metadata store: ''{1}''", new Object[] { obj.getClass().getName(), newObjectJSON });
++				referenceMap.put(obj.getMetaDataObject().getReference().getId(), storeJSONObject(newObjectJSON)); // Store new object id in reference map
++			}
++		}
++	}
++
++	@Override
++	public MetaDataObject getParent(MetaDataObject metaDataObject) {
++		String queryString = "";
++		Class<? extends MetaDataObject> type = MetaDataObject.class;
++		String objectId = metaDataObject.getReference().getId();
++		if (metaDataObject instanceof Column) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__RelationalDataSet.columns\").toList()";
++			type = RelationalDataSet.class;
++		} else if (metaDataObject instanceof Connection) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataStore.connections\").toList()";
++			type = DataStore.class;
++		} else if (metaDataObject instanceof DataFileFolder) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataFileFolder.dataFileFolders\").toList()";
++			type = DataFileFolder.class;
++		} else if (metaDataObject instanceof DataFile) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataFileFolder.dataFiles\").toList()";
++			type = DataFileFolder.class;
++		} else if (metaDataObject instanceof Schema) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__Database.schemas\").toList()";
++			type = Database.class;
++		} else if (metaDataObject instanceof Table) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__Schema.tables\").toList()";
++			type = Schema.class;
++		}
++		List<MetaDataObjectReference> parentList = search(queryString);
++		if (parentList.size() == 1) {
++			return InternalMetaDataUtils.getObjectList(this, parentList, type).get(0);
++		} else if (parentList.size() == 0) {
++			return null;
++		}
++		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
++		throw new MetadataStoreException(errorMessage);
++	}
++
++	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type) {
++		String queryString = "";
++		String objectId = metaDataObject.getReference().getId();
++		if (MetadataStoreBase.ODF_COLUMNS_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__RelationalDataSet.columns\").toList()";
++		} else if (MetadataStoreBase.ODF_CONNECTIONS_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataStore.connections\").toList()";
++		} else if (MetadataStoreBase.ODF_DATAFILEFOLDERS_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataFileFolder.dataFileFolders\").toList()";
++		} else if (MetadataStoreBase.ODF_DATAFILES_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataFileFolder.dataFiles\").toList()";
++		} else if (MetadataStoreBase.ODF_SCHEMAS_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__Database.schemas\").toList()";
++		} else if (MetadataStoreBase.ODF_TABLES_REFERENCE.equals(attributeName)) {
++			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__Schema.tables\").toList()";
++		}
++		return InternalMetaDataUtils.getObjectList(this, search(queryString), type);
++	};
++
++}
+diff --git a/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java
+new file mode 100755
+index 0000000..d06d8b5
+--- /dev/null
++++ b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java
+@@ -0,0 +1,409 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata.atlas;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.lang.reflect.Field;
++import java.lang.reflect.ParameterizedType;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONArray;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreBase;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++import org.apache.atlas.odf.api.metadata.UnknownMetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++
++/**
++ * This class converts ODF objects to Atlas objects / REST API requests
++ * and vice versa.
++ *
++ *
++ */
++public class AtlasModelBridge {
++	Logger logger = Logger.getLogger(AtlasModelBridge.class.getName());
++	MetadataStore mds;
++
++    private static final HashMap<String, String> referenceNameMap = new HashMap<String, String>();
++    static {
++        referenceNameMap.put(MetadataStoreBase.ODF_COLUMNS_REFERENCE, "columns");
++        referenceNameMap.put(MetadataStoreBase.ODF_CONNECTIONS_REFERENCE, "connections");
++        referenceNameMap.put(MetadataStoreBase.ODF_DATAFILEFOLDERS_REFERENCE, "dataFileFolders");
++        referenceNameMap.put(MetadataStoreBase.ODF_DATAFILES_REFERENCE, "dataFiles");
++        referenceNameMap.put(MetadataStoreBase.ODF_SCHEMAS_REFERENCE, "schemas");
++        referenceNameMap.put(MetadataStoreBase.ODF_TABLES_REFERENCE, "tables");
++    }
++
++	public AtlasModelBridge(MetadataStore mds) {
++		this.mds = mds;
++	}
++
++	static ODFSettings getODFConfig() {
++		ODFSettings odfconf = new ODFFactory().create().getSettingsManager().getODFSettings();
++		return odfconf;
++	}
++
++	private boolean isAtlasType(Object atlasJson, String className) {
++		if ((atlasJson instanceof JSONObject) && ((JSONObject) atlasJson).containsKey("jsonClass")) {
++			Object jsonClass = ((JSONObject) atlasJson).opt("jsonClass");
++			if (jsonClass instanceof String) {
++				return jsonClass.toString().equals(className);
++			}
++		}
++		return false;
++	}
++
++	private Object convertAtlasJsonToODF(Object atlasJson, int level) throws JSONException {
++		Object resultObj = atlasJson;
++		if (atlasJson instanceof JSONObject) {
++			JSONObject valJson = (JSONObject) atlasJson;
++			if (isAtlasType(valJson, "org.apache.atlas.typesystem.json.InstanceSerialization$_Id")) {
++				// JSON object is reference to other object
++				String id = (String) valJson.get("id");
++				resultObj = createODFReferenceJSON(level, id);
++			} else if ("org.apache.atlas.typesystem.json.InstanceSerialization$_Reference".equals(valJson.opt("jsonClass"))) {
++				// treat References the same as IDs
++				JSONObject idObj = (JSONObject) valJson.get("id");
++				String id = (String) idObj.get("id");
++				resultObj = createODFReferenceJSON(level, id);
++			} else if (valJson.opt("$typeName$") != null && (valJson.opt("id") instanceof String)) {
++				// this only happens if the object was retrieved via the /discovery/search resource and not through /entities
++				resultObj = createODFReferenceJSON(level, valJson.getString("id"));
++			} else {
++				JSONObject convertedJSONObject = new JSONObject();
++				// always remove annotations property as it is no longer part of MetaDataObject
++				valJson.remove("annotations");
++
++				// Remove referenes to other objects because they are not attributes of the corresponding metadata objects
++				for (String referenceName : referenceNameMap.values()) {
++					valJson.remove(referenceName);
++				}
++
++				for (Object key : valJson.keySet()) {
++					Object value = valJson.get(key);
++					convertedJSONObject.put(key, convertAtlasJsonToODF(value, level + 1));
++				}
++				if (isAtlasType(convertedJSONObject, "org.apache.atlas.typesystem.json.InstanceSerialization$_Struct") && (convertedJSONObject.containsKey("values"))) {
++					// Remove Atlas struct object
++					convertedJSONObject = (JSONObject) convertedJSONObject.get("values");
++				}
++				resultObj = convertedJSONObject;
++			}
++		} else if (atlasJson instanceof JSONArray) {
++			JSONArray arr = (JSONArray) atlasJson;
++			JSONArray convertedArray = new JSONArray();
++			for (Object o : arr) {
++				// don't increase level if traversing an array
++				convertedArray.add(convertAtlasJsonToODF(o, level));
++			}
++			resultObj = convertedArray;
++		}
++		return resultObj;
++	}
++
++
++	private JSONObject createODFReferenceJSON(int level, String id) throws JSONException {
++		JSONObject mdoref = new JSONObject();
++		mdoref.put("id", id);
++		mdoref.put("repositoryId", this.mds.getRepositoryId());
++		mdoref.put("url", (String) this.mds.getProperties().get("atlas.url"));
++		return mdoref;
++	}
++
++	public MetaDataObject createMetaDataObjectFromAtlasSearchResult(JSONObject json, int level) throws JSONException {
++		String guid = (String) ((JSONObject) json.get("$id$")).get("id");
++		String typeName = json.getString("$typeName$");
++		json.remove("$id$");
++		json.remove("$typeName$");
++		MetaDataObject mdo = createMDOSkeletonForType(level, json, typeName);
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId(guid);
++		ref.setRepositoryId(this.mds.getRepositoryId());
++		ref.setUrl((String) this.mds.getProperties().get("atlas.url"));
++		mdo.setReference(ref);
++		return mdo;
++	}
++
++	public MetaDataObject createMetaDataObjectFromAtlasEntity(JSONObject json, int level) throws JSONException {
++		String guid = (String) (((JSONObject) ((JSONObject) json.get("definition")).get("id")).get("id"));
++		MetaDataObject mdo = createMDOSkeleton(json, level);
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId(guid);
++		ref.setRepositoryId(this.mds.getRepositoryId());
++		ref.setUrl((String) this.mds.getProperties().get("atlas.url"));
++		mdo.setReference(ref);
++		return mdo;
++	}
++
++	private MetaDataObject createMDOSkeleton(JSONObject json, int level) {
++		try {
++			JSONObject def = (JSONObject) json.get("definition");
++			if (def != null) {
++				JSONObject values = (JSONObject) def.get("values");
++				if (values != null) {
++					String typeName = (String) def.get("typeName");
++					if (typeName != null) {
++						return createMDOSkeletonForType(level, values, typeName);
++					}
++				}
++			}
++		} catch (Exception exc) {
++			// interpret all exceptions as "incorrect format"
++			String msg = "Conversion of JSON to metadata object failed, using default";
++			logger.log(Level.WARNING, msg, exc);
++		}
++		// fallback, create generic MDO
++		return new UnknownMetaDataObject();
++	}
++
++
++	private MetaDataObject createMDOSkeletonForType(int level, JSONObject values, String typeName)
++			throws JSONException {
++		MetaDataObject result = new UnknownMetaDataObject(); // Unknown by default
++		Class<?> cl;
++		//TODO: Move MetaDataObject.java into models package and use this instead of DataSet
++		String fullClassName = DataSet.class.getPackage().getName() + "." + typeName;
++		try {
++			cl = Class.forName(fullClassName);
++		} catch (ClassNotFoundException e) {
++			String messageText = MessageFormat.format("Cannot fine class ''{0}''.", fullClassName);
++			throw new MetadataStoreException(messageText, e);
++		}
++		if (cl != null) {
++			JSONObject modifiedValues = (JSONObject) this.convertAtlasJsonToODF(values, level);
++			if (typeName.equals("ProfilingAnnotation") || typeName.equals("ClassificationAnnotation") || typeName.equals("RelationshipAnnotation")) {
++				result = (MetaDataObject) JSONUtils.fromJSON(modifiedValues.write(), Annotation.class);
++			} else {
++				modifiedValues.put("javaClass", cl.getName());
++				result = (MetaDataObject) JSONUtils.fromJSON(modifiedValues.write(), cl);
++			}
++		}
++		return result;
++	}
++	@SuppressWarnings("rawtypes")
++	public JSONObject createAtlasEntityJSON(StoredMetaDataObject storedObject, HashMap<String, String> typeMap, HashMap<String, MetaDataObjectReference> referenceMap, JSONObject originalAtlasJson) {
++		JSONObject objectJson = null;
++		MetaDataObject object = storedObject.getMetaDataObject();
++		try {
++			logger.log(Level.FINE, "Storing instance of " + object.getClass().getName());
++			JSONObject valuesJSON = JSONUtils.toJSONObject(object); // Initialize value JSON with attributes from MetaDataObject
++			valuesJSON.remove("reference"); // Remove object reference because it must not be stored in Atlas
++			Class<?> cl = object.getClass();
++			while (cl != MetaDataObject.class) {  // process class hierarchy up to but excluding MetaDataObject
++				Field fields[] = cl.getDeclaredFields();
++				for (Field f: fields) {
++					f.setAccessible(true);
++					try {
++						Class<?> fieldClass = f.getType();
++						Object fieldObject = f.get(object);
++						if (fieldObject != null) {
++							String fieldName = f.getName();
++							if (fieldClass.getName().equals(List.class.getName())) {
++								// Process reference lists which are stored in attributes of the actuals MetaDataObject, e.g. for Annotations
++						        ParameterizedType stringListType = (ParameterizedType) f.getGenericType();
++						        if (!((List) fieldObject).isEmpty()) {
++							        Class<?> listElementClass = (Class<?>) stringListType.getActualTypeArguments()[0];
++							        if (listElementClass.equals(MetaDataObjectReference.class)) {
++										JSONArray referenceArray = new JSONArray();
++										@SuppressWarnings("unchecked")
++										List<MetaDataObjectReference> members = (List<MetaDataObjectReference>) fieldObject;
++										for (MetaDataObjectReference mdor : members) {
++											String referenceId = ((MetaDataObjectReference) mdor).getId();
++											if (referenceMap.containsKey(referenceId)) {
++												referenceArray.add(createAnnotatedObjectReference(referenceMap.get(referenceId),typeMap.get(referenceId)));
++											} else {
++												referenceArray.add(createAnnotatedObjectReference(mdor, mds.retrieve(mdor).getClass().getSimpleName()));
++											}
++										}
++										valuesJSON.put(fieldName, referenceArray);
++							        }
++						        }
++							} else if (fieldClass == MetaDataObjectReference.class) {
++								// Process individual references which are stored in attributes of the actuals MetaDataObject, e.g. for Annotations
++								String referenceId = ((MetaDataObjectReference) fieldObject).getId();
++								if (referenceMap.containsKey(referenceId)) {
++									valuesJSON.put(fieldName, createAnnotatedObjectReference(referenceMap.get(referenceId), "MetaDataObject"));
++								} else {
++									valuesJSON.put(fieldName, createAnnotatedObjectReference((MetaDataObjectReference) fieldObject, "MetaDataObject"));
++								}
++							} else {
++								valuesJSON.put(fieldName, fieldObject);
++							}
++						}
++					} catch (IllegalAccessException e) {
++						throw new IOException(e);
++					}
++				}
++				cl = cl.getSuperclass();
++			}
++
++			// Store references to other objects which are not attributes of the MetaDataObject
++			for(String referenceType : mds.getReferenceTypes()) {
++				String atlasReferenceName = referenceNameMap.get(referenceType);
++				// Add references of original Atlas object
++				JSONArray referenceArray = new JSONArray();
++				if ((originalAtlasJson != null) && (originalAtlasJson.get("definition") != null)) {
++					JSONObject values = originalAtlasJson.getJSONObject("definition").getJSONObject("values");
++					if ((values != null) && (values.containsKey(atlasReferenceName))) {
++						if (values.get(atlasReferenceName) instanceof JSONArray) {
++							referenceArray = values.getJSONArray(atlasReferenceName);
++						}
++					}
++				}
++				if (storedObject.getReferenceMap().containsKey(referenceType)) {
++					// Add new references for the reference type
++					for (MetaDataObjectReference mdor : storedObject.getReferenceMap().get(referenceType)) {
++						String referenceId = ((MetaDataObjectReference) mdor).getId();
++						if (referenceMap.containsKey(referenceId)) {
++							referenceArray.add(createAnnotatedObjectReference(referenceMap.get(referenceId),typeMap.get(referenceId)));
++						} else {
++							referenceArray.add(createAnnotatedObjectReference(mdor, mds.retrieve(mdor).getClass().getSimpleName()));
++						}
++					}
++				}
++				if (referenceArray.size() > 0) {
++					valuesJSON.put(atlasReferenceName, referenceArray);
++				}
++			}
++
++			String objectType;
++			if (object instanceof Annotation) {
++				objectType = (object instanceof ProfilingAnnotation) ? "ProfilingAnnotation" :
++					(object instanceof ClassificationAnnotation) ? "ClassificationAnnotation" :
++					"RelationshipAnnotation";
++			} else {
++				objectType = object.getClass().getSimpleName();
++			}
++			if (originalAtlasJson != null) {
++				// When updating an existing object, its must point to the correct object id in Atlas
++				objectJson = this.createAtlasEntitySkeleton(objectType, object.getReference().getId());
++			} else {
++				// For new objects, a generic id is used
++				objectJson = this.createAtlasEntitySkeleton(objectType, null);
++			}
++			objectJson.put("values", valuesJSON);
++		} catch (IOException exc) {
++			throw new MetadataStoreException(exc);
++		}
++		catch (JSONException exc) {
++			throw new MetadataStoreException(exc);
++		}
++		return objectJson;
++	}
++
++	/**
++	 * Create an empty Atlas object of a certain type of a certain guid.
++	 * Can be used in entity POST requests for creating or (partial) update
++	 */
++	private JSONObject createAtlasEntitySkeleton(String typeName, String guid) {
++		try {
++			JSONObject obj = null;
++			obj = new JSONObject(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json"));
++			obj.put("typeName", typeName);
++			JSONObject id = (JSONObject) obj.get("id");
++			id.put("typeName", typeName);
++			if (guid != null) {
++				id.put("id", guid);
++			}
++			return obj;
++		} catch (JSONException exc) {
++			throw new MetadataStoreException(exc);
++		}
++	}
++
++	/**
++	 * check if the reference belongs to this repository. Throw exception if not.
++	 */
++	void checkReference(MetaDataObjectReference reference) {
++		if (reference == null) {
++			throw new MetadataStoreException("Reference cannot be null");
++		}
++		if ((reference.getRepositoryId() != null) && !reference.getRepositoryId().equals(mds.getRepositoryId())) {
++			throw new MetadataStoreException(
++					MessageFormat.format("Repository ID ''{0}'' of reference does not match the one of this repository ''{1}''", new Object[] { reference.getRepositoryId(), mds.getRepositoryId() }));
++		}
++	}
++
++	/**
++	 * create an Atlas object reference that can be used whenever Atlas uses references in JSON requests
++	 */
++	public JSONObject createAtlasObjectReference(String guid, String typeName) {
++		JSONObject ref;
++		try {
++			InputStream is = this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json");
++			ref = new JSONObject(is);
++			is.close();
++			ref.put("id", guid);
++			ref.put("typeName", typeName);
++		} catch (IOException | JSONException e) {
++			// should not go wrong
++			throw new RuntimeException(e);
++		}
++		return ref;
++	}
++
++	public JSONObject createAnnotatedObjectReference(MetaDataObjectReference annotatedObjectRef, String typeName) {
++		this.checkReference(annotatedObjectRef);
++		String annotatedObjectId = annotatedObjectRef.getId();
++		return this.createAtlasObjectReference(annotatedObjectId, typeName);
++	}
++
++	public List<StoredMetaDataObject> getRootObjects(HashMap<String, StoredMetaDataObject> objectHashMap) {
++		List<StoredMetaDataObject> rootObjectList = new ArrayList<StoredMetaDataObject>();
++		for (StoredMetaDataObject object : objectHashMap.values()) {
++			if (isRootObject(object, objectHashMap)) {
++				rootObjectList.add(object);
++			}
++		}
++		return rootObjectList;
++	}
++
++	private boolean isRootObject(StoredMetaDataObject object, HashMap<String, StoredMetaDataObject> objectHashMap) {
++		String objectId = object.getMetaDataObject().getReference().getId();
++		try {
++			for (StoredMetaDataObject currentObject : objectHashMap.values()) {
++				String currentObjectId = currentObject.getMetaDataObject().getReference().getId();
++				if (!currentObjectId.equals(objectId)) {
++					// If it is not the object itself, check whether the current object contains a reference to the object
++					if (JSONUtils.toJSON(currentObject).contains(objectId)) {
++						// If it does, it cannot be a root object
++						return false;
++					}
++				}
++			}
++			return true;
++		} catch (JSONException e) {
++			throw new MetadataStoreException(MessageFormat.format("Error converting object of class ''{0}'' to JSON string", object.getClass().getName()), e);
++		}
++	}
++
++}
+diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
+new file mode 100755
+index 0000000..f2630f6
+--- /dev/null
++++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
+@@ -0,0 +1,444 @@
++{
++	"enumTypes": [],
++	"structTypes": [],
++	"traitTypes": [],
++	"classTypes": [
++		{
++			"superTypes": [],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "MetaDataObject",
++			"typeDescription": "The base open metadata object.",
++			"attributeDefinitions": [
++				{
++					"name": "name",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "javaClass",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "description",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "annotations",
++					"dataTypeName": "array<Annotation>",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "originRef",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "replicaRefs",
++					"dataTypeName": "array<string>",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "DataStore",
++			"typeDescription": "The base for all kinds of data stores.",
++			"attributeDefinitions": [
++				{
++					"name": "connections",
++					"dataTypeName": "array<Connection>",
++					"multiplicity": "optional",
++					"isComposite": true,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Connection",
++			"typeDescription": "The base for all kinds of connections.",
++			"attributeDefinitions": []
++		},
++		{
++			"superTypes": ["DataStore"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Database",
++			"typeDescription": "A relational database.",
++			"attributeDefinitions":
++			[
++				{
++					"name": "dbType",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "schemas",
++					"dataTypeName": "array<Schema>",
++					"multiplicity": "optional",
++					"isComposite": true,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		{
++			"superTypes": ["Connection"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "JDBCConnection",
++			"typeDescription": "A JDBC connection.",
++			"attributeDefinitions": [
++				{
++					"name": "jdbcConnectionString",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "user",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "password",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++
++			]
++		},
++		
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "DataFileFolder",
++			"typeDescription": "A folder containg data files or other folders.",
++			"attributeDefinitions": [
++				{
++					"name": "dataFileFolders",
++					"dataTypeName": "array<DataFileFolder>",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "dataFiles",
++					"dataTypeName": "array<DataFile>",
++					"multiplicity": "optional",
++					"isComposite": true,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "DataSet",
++			"typeDescription": "The base for all kinds of data sets (tables, files, etc.).",
++			"attributeDefinitions": [
++			]
++		},
++
++		{
++			"superTypes": ["DataSet"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "RelationalDataSet",
++			"typeDescription": "The base of a relational data set.",
++			"attributeDefinitions": [
++				{
++					"name": "columns",
++					"dataTypeName": "array<Column>",
++					"multiplicity": "optional",
++					"isComposite": true,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Column",
++			"typeDescription": "A relational column.",
++			"attributeDefinitions": [
++				{
++					"name": "dataType",
++					"dataTypeName": "string",
++					"multiplicity": "required",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++
++        {
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Schema",
++			"typeDescription": "The schema of a relational database",
++			"attributeDefinitions": [
++				{
++					"name": "tables",
++					"dataTypeName": "array<Table>",
++					"multiplicity": "optional",
++					"isComposite": true,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++				
++		{
++			"superTypes": ["RelationalDataSet"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Table",
++			"typeDescription": "A relational table.",
++			"attributeDefinitions": [
++			]
++		},
++
++		{
++			"superTypes": ["RelationalDataSet"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "DataFile",
++			"typeDescription": "A file containing relational data.",
++			"attributeDefinitions": [
++				{
++					"name": "urlString",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		{
++			"superTypes": ["DataSet"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Document",
++			"typeDescription": "An unstructured document",
++			"attributeDefinitions": [
++				{
++					"name": "urlString",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "encoding",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "Annotation",
++			"typeDescription": "The base for all annotations created through the Open Discovery Framework.",
++			"attributeDefinitions": [
++				{
++					"name": "annotationType",
++					"dataTypeName": "string",
++					"multiplicity": "required",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "analysisRun",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "jsonProperties",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "summary",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		
++		{
++			"superTypes": ["Annotation"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "ProfilingAnnotation",
++			"typeDescription": "The base for all annotations carrying profile attributes of an object.",
++			"attributeDefinitions": [
++				{
++					"name": "profiledObject",
++					"dataTypeName": "MetaDataObject",
++					"multiplicity": "required",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		
++		{
++			"superTypes": ["Annotation"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "ClassificationAnnotation",
++			"typeDescription": "The base for all annotations assigning a object to another object.",
++			"attributeDefinitions": [
++				{
++					"name": "classifiedObject",
++					"dataTypeName": "MetaDataObject",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "classifyingObjects",
++					"dataTypeName": "array<MetaDataObject>",
++					"multiplicity": "collection",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++		
++		{
++			"superTypes": ["Annotation"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "RelationshipAnnotation",
++			"typeDescription": "The base for all annotations expressing a relationship between objects.",
++			"attributeDefinitions": [
++				{
++					"name": "relatedObjects",
++					"dataTypeName": "array<MetaDataObject>",
++					"multiplicity": "collection",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		},
++
++		{
++			"superTypes": ["MetaDataObject"],
++			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
++			"typeName": "BusinessTerm",
++			"typeDescription": "A business term of the glossary.",
++			"attributeDefinitions": [
++				{
++					"name": "abbreviations",
++					"dataTypeName": "array<string>",
++					"multiplicity": "collection",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "example",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				},
++				{
++					"name": "usage",
++					"dataTypeName": "string",
++					"multiplicity": "optional",
++					"isComposite": false,
++					"isUnique": false,
++					"isIndexable": true,
++					"reverseAttributeName": null
++				}
++			]
++		}
++
++	]
++}
+diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json
+new file mode 100755
+index 0000000..ec546e7
+--- /dev/null
++++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json
+@@ -0,0 +1,16 @@
++{
++	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
++	"id": {
++		"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
++		"id": "-1",
++		"version": 0,
++		"typeName": "ODFAnnotation"
++	},
++	"typeName": "ODFAnnotation",
++	"values": {
++		
++	},
++	"traitNames": [],
++	"traits": {
++	}
++}
+diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json
+new file mode 100755
+index 0000000..99ad73c
+--- /dev/null
++++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json
+@@ -0,0 +1,16 @@
++{
++	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
++	"id": {
++		"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Id",
++		"id": "-5445763795823115",
++		"version": 0,
++		"typeName": "ODFAnnotation"
++	},
++	"typeName": "ODFAnnotation",
++	"values": {
++		
++	},
++	"traitNames": [],
++	"traits": {
++	}
++}
+diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json
+new file mode 100755
+index 0000000..8514fc6
+--- /dev/null
++++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json
+@@ -0,0 +1,6 @@
++{
++	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Id",
++	"id": "-5445763795823115",
++	"version": 0,
++	"typeName": "TYPE_NAME"
++}
+diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..500aae7
+--- /dev/null
++++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,15 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Overwrite default implementation
++MetadataStore=org.apache.atlas.odf.core.metadata.atlas.AtlasMetadataStore
+diff --git a/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java b/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java
+new file mode 100755
+index 0000000..1458cb3
+--- /dev/null
++++ b/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java
+@@ -0,0 +1,50 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.runtime;
++
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.apache.atlas.odf.core.test.messaging.MockQueueManager;
++import org.apache.atlas.odf.core.test.spark.MockSparkServiceExecutor;
++import org.apache.atlas.odf.core.test.store.MockConfigurationStorage;
++
++public class ODFFactoryClassesNoMockTest extends ODFTestcase {
++
++	Logger logger = Logger.getLogger(ODFFactoryClassesNoMockTest.class.getName());
++
++	<T> void testFactoryDoesNotCreateInstanceOf(Class<T> interfaceClass, Class<? extends T> mockClass) {
++		ODFInternalFactory f = new ODFInternalFactory();
++		logger.info("Testing mock class for interface: " + interfaceClass.getName());
++		T obj = f.create(interfaceClass);
++		logger.info("Factory created object of type " + obj.getClass().getName());
++		Assert.assertFalse(mockClass.isInstance(obj));
++	}
++
++	@Test
++	public void testNoMockClasses() {
++		logger.info("Testing that no mock classes are used");
++
++		testFactoryDoesNotCreateInstanceOf(ODFConfigurationStorage.class, MockConfigurationStorage.class);
++		testFactoryDoesNotCreateInstanceOf(DiscoveryServiceQueueManager.class, MockQueueManager.class);
++		testFactoryDoesNotCreateInstanceOf(SparkServiceExecutor.class, MockSparkServiceExecutor.class);
++	}
++}
+diff --git a/odf/odf-core/.gitignore b/odf/odf-core/.gitignore
+new file mode 100755
+index 0000000..9d8eebd
+--- /dev/null
++++ b/odf/odf-core/.gitignore
+@@ -0,0 +1,6 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++derby.log
+diff --git a/odf/odf-core/pom.xml b/odf/odf-core/pom.xml
+new file mode 100755
+index 0000000..00f5cdb
+--- /dev/null
++++ b/odf/odf-core/pom.xml
+@@ -0,0 +1,112 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns:if="ant:if">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-core</artifactId>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.wink</groupId>
++			<artifactId>wink-json4j</artifactId>
++			<version>1.4</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.httpcomponents</groupId>
++			<artifactId>fluent-hc</artifactId>
++			<version>4.5.1</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.derby</groupId>
++			<artifactId>derby</artifactId>
++			<version>10.12.1.1</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.jasypt</groupId>
++			<artifactId>jasypt</artifactId>
++			<version>1.9.2</version>
++		</dependency>
++		<dependency>
++			<artifactId>swagger-jaxrs</artifactId>
++			<version>1.5.9</version>
++			<groupId>io.swagger</groupId>
++			<scope>compile</scope>
++		</dependency>
++	</dependencies>
++
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
++						<odf.build.project.name>${project.name}</odf.build.project.name>
++					</systemPropertyVariables>
++					<includes>
++						<include>**/configuration/**</include>
++						<!-- All other odf-core unit tests are executed in the odf-messaging project -->
++						<!-- Add individual test here to run them with the MockQueueManager rather than with Kafka -->
++					</includes>
++					<excludes>
++						<exclude>**/integrationtest/**</exclude>
++					</excludes>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-jar-plugin</artifactId>
++				<version>2.6</version>
++				<executions>
++					<execution>
++						<goals>
++							<goal>test-jar</goal>
++						</goals>
++						<configuration>
++						<!-- remove implementations properties file for test jar -->
++							<excludes>
++								<exclude>org/apache/atlas/odf/odf-implementation.properties</exclude>
++							</excludes>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++
++</project>
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java
+new file mode 100755
+index 0000000..ffd2ba9
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java
+@@ -0,0 +1,67 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import org.jasypt.exceptions.EncryptionOperationNotPossibleException;
++import org.jasypt.util.text.BasicTextEncryptor;
++import org.apache.commons.codec.binary.Base64;
++
++public class Encryption {
++
++	//TODO Store this password at a secure location provided by the surrounding platform.
++	private static final String b64EncryptionPassword = "eGg1NyQyMyUtIXFQbHoxOHNIdkM=";
++
++	public static String encryptText(String plainText) {
++		if ((plainText != null) && (!plainText.isEmpty())) {
++			BasicTextEncryptor textEncryptor = new BasicTextEncryptor();
++			byte[] plainEncryptionPassword= Base64.decodeBase64(b64EncryptionPassword);
++			textEncryptor.setPassword(new String(plainEncryptionPassword));
++			return textEncryptor.encrypt(plainText);
++		} else {
++			return plainText;
++		}
++	}
++
++	public static String decryptText(String encryptedText) {
++		if ((encryptedText != null) && (!encryptedText.isEmpty())) {
++			BasicTextEncryptor textEncryptor = new BasicTextEncryptor();
++			byte[] plainEncryptionPassword= Base64.decodeBase64(b64EncryptionPassword);
++			textEncryptor.setPassword(new String(plainEncryptionPassword));
++			String result = textEncryptor.decrypt(encryptedText);
++			return result;
++		} else {
++			return encryptedText;
++		}
++	}
++	
++	public static boolean isEncrypted(String text) {
++		try {
++			decryptText(text);
++		} catch(EncryptionOperationNotPossibleException exc) {
++			return false;
++		}
++		return true;
++	}
++
++	/*
++	// Uncomment and use the following code for encrypting passwords to be stored in the odf-initial-configuration.json file.
++	public static void main(String[] args) {
++		if (args.length != 1)  {
++			System.out.println("usage: java Encryption <plain password>");
++		} else {
++			System.out.println("Encrypted password: " + encryptText(args[0]));
++		}
++	}
++	 */
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java
+new file mode 100755
+index 0000000..313b25d
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.util.List;
++import java.util.Map;
++
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++
++public interface Environment {
++	
++	String getZookeeperConnectString();
++	
++	String getProperty(String propertyName);
++	
++	Map<String, String> getPropertiesWithPrefix(String prefix);
++	
++	String getCurrentUser();
++	
++	ConfigContainer getDefaultConfiguration();
++	
++	/**
++	 * Returns the names of the runtimes active in this environment.
++	 * Return null to indicate that all available runtimes should be active.
++	 */
++	List<String> getActiveRuntimeNames();
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java
+new file mode 100755
+index 0000000..4021049
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java
+@@ -0,0 +1,95 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.net.URL;
++import java.text.MessageFormat;
++import java.util.Enumeration;
++import java.util.HashMap;
++import java.util.Map;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++public class ODFImplementations {
++
++	Logger logger = Logger.getLogger(ODFImplementations.class.getName());
++
++	private Map<String, String> implementations = new HashMap<String, String>();
++
++	public ODFImplementations(String path, ClassLoader cl) {
++		Enumeration<URL> resources;
++		try {
++			resources = cl.getResources(path);
++		} catch (IOException exc) {
++			logger.log(Level.WARNING, MessageFormat.format("An error occurred while reading properties ''0'' could not be loaded", path), exc);
++			return;
++		}
++		while (resources.hasMoreElements()) {
++			URL url = resources.nextElement();
++			try {
++				InputStream is = url.openStream();
++				if (is != null) {
++					Properties props = new Properties();
++					props.load(is);
++					for (Object key : props.keySet()) {
++						String keyString = (String) key;
++						try {
++							if (implementations.containsKey(key)) {
++								String existingClassString = implementations.get(keyString);
++								String newClassString = props.getProperty(keyString);
++								if (!existingClassString.equals(newClassString)) {
++									Class<?> existingClass = cl.loadClass(existingClassString);
++									Class<?> newClass = cl.loadClass(newClassString);
++									String superClass = null;
++									String subClass = null;
++									// select the class lowest in the class hierarchy 
++									if (existingClass.isAssignableFrom(newClass)) {
++										superClass = existingClassString;
++										subClass = newClassString;
++									} else if (newClass.isAssignableFrom(existingClass)) {
++										superClass = newClassString;
++										subClass = existingClassString;
++									}
++									if (superClass != null) {
++										logger.log(Level.INFO, "Implementation for interface ''{0}'' was found more than once, using subclass ''{1}'' (found superclass ''{2}'')",
++												new Object[] { key, subClass, superClass });
++										implementations.put(keyString, subClass);
++									} else {
++										logger.log(Level.WARNING, "Implementation for interface ''{0}'' was found more than once, using ''{1}''. (Conflict between ''{1}'' and ''{2}'')",
++												new Object[] { key, existingClassString, newClassString });
++									}
++								}
++							} else {
++								cl.loadClass(props.getProperty(keyString));
++								implementations.put(keyString, props.getProperty(keyString));
++							}
++						} catch (ClassNotFoundException exc) {
++							logger.log(Level.SEVERE, "Class found in odf-implementation.properties file could not be loaded", exc);
++						}
++					}
++				}
++			} catch (IOException e) {
++				logger.log(Level.WARNING, MessageFormat.format("Properties ''0'' could not be loaded", url), e);
++			}
++		}
++	}
++
++	public Map<String, String> getImplementations() {
++		return implementations;
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java
+new file mode 100755
+index 0000000..64e54ad
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java
+@@ -0,0 +1,97 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++
++public class ODFInitializer {
++
++	static Logger logger = Logger.getLogger(ODFInitializer.class.getName());
++
++	static Object initLock = new Object();
++
++	private static boolean running = false;
++	private static long lastStopTimestamp = 0;
++	private static long lastStartTimestamp = 0;
++	private static boolean startStopInProgress = false;
++	
++
++	public static long getLastStopTimestamp() {
++		synchronized (initLock) {
++			return lastStopTimestamp;
++		}
++	}
++
++	public static long getLastStartTimestamp() {
++		synchronized (initLock) {
++			return lastStartTimestamp;
++		}
++	}
++
++	public static boolean isRunning() {
++		synchronized (initLock) {
++			return running;
++		}
++	}
++	
++	public static boolean isStartStopInProgress() {
++		return startStopInProgress;
++	}
++
++	public static void start() {
++		synchronized (initLock) {
++			if (!running) {
++				startStopInProgress = true;
++				DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
++				try {
++					qm.start();
++				} catch (Exception e) {
++					logger.log(Level.WARNING, "Timeout occurred while starting ODF", e);
++				}
++				lastStartTimestamp = System.currentTimeMillis();
++				running = true;
++				startStopInProgress = false;
++			}
++		}
++	}
++
++	public static void stop() {
++		synchronized (initLock) {
++			if (running) {
++				startStopInProgress = true;
++				ODFInternalFactory f = new ODFInternalFactory();
++				DiscoveryServiceQueueManager qm = f.create(DiscoveryServiceQueueManager.class);
++				try {
++					qm.stop();
++				} catch (TimeoutException e) {
++					logger.log(Level.WARNING, "Timeout occurred while stopping ODF", e);
++				}
++				ThreadManager tm = f.create(ThreadManager.class);
++				tm.shutdownAllUnmanagedThreads();
++				AnalysisRequestTrackerStore arts = f.create(AnalysisRequestTrackerStore.class);
++				arts.clearCache();
++				lastStopTimestamp = System.currentTimeMillis();
++				running = false;
++				startStopInProgress = false;
++			}
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java
+new file mode 100755
+index 0000000..4fd09a7
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java
+@@ -0,0 +1,93 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.util.HashMap;
++import java.util.Map;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.utils.ODFLogConfig;
++
++public class ODFInternalFactory {
++
++	private static Properties defaultImplemenetations = Utils.readConfigProperties("org/apache/atlas/odf/core/internal/odf-default-implementation.properties");
++	private static ODFImplementations overwrittenImplementations = null;
++	private static Map<Class<?>, Object> singletons = new HashMap<>();
++
++	public static String SINGLETON_MARKER = "@singleton";
++
++	static {
++		ODFLogConfig.run();
++
++		Logger logger = Logger.getLogger(ODFInternalFactory.class.getName());
++		ClassLoader cl = ODFInternalFactory.class.getClassLoader();
++		String overwriteConfig = "org/apahe/atlas/odf/odf-implementation.properties";
++		overwrittenImplementations = new ODFImplementations(overwriteConfig, cl);
++		if (overwrittenImplementations.getImplementations().isEmpty()) {
++			overwrittenImplementations = null;
++		} else {
++			logger.log(Level.INFO, "Found overwritten implementation config: {0}", overwrittenImplementations.getImplementations());
++		}
++		if (overwrittenImplementations == null) {
++			logger.log(Level.INFO, "Default implementations are used");
++		}
++	}
++
++	private Object createObject(Class<?> cl) throws ClassNotFoundException, IllegalAccessException, InstantiationException {
++		String clazz = null;
++		if (overwrittenImplementations != null) {
++			clazz = overwrittenImplementations.getImplementations().get(cl.getName());
++		}
++		if (clazz == null) {
++			clazz = defaultImplemenetations.getProperty(cl.getName());
++		}
++		if (clazz == null) {
++			// finally try to instantiate the class as such
++			clazz = cl.getName();
++		}
++		boolean isSingleton = false;
++		if (clazz.endsWith(SINGLETON_MARKER)) {
++			clazz = clazz.substring(0, clazz.length() - SINGLETON_MARKER.length());
++			isSingleton = true;
++		}
++		Object o = null;
++		Class<?> implClass = this.getClass().getClassLoader().loadClass(clazz);
++		if (isSingleton) {
++			o = singletons.get(implClass);
++			if (o == null) {
++				o = implClass.newInstance();
++				singletons.put(implClass, o);
++			}
++		} else {
++			o = implClass.newInstance();
++		}
++		return o;
++	}
++
++	@SuppressWarnings("unchecked")
++	public <T> T create(Class<T> cl) {
++		try {
++			return (T) createObject(cl);
++		} catch (ClassNotFoundException e) {
++			throw new RuntimeException(e);
++		} catch (IllegalAccessException e) {
++			throw new RuntimeException(e);
++		} catch (InstantiationException e) {
++			throw new RuntimeException(e);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java
+new file mode 100755
+index 0000000..623a727
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java
+@@ -0,0 +1,77 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.text.MessageFormat;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++
++public class ODFUtils {
++	public static int DEFAULT_TIMEOUT_SECS = 10 * 60; // 10 minutes
++
++	public static AnalysisRequestStatus runSynchronously(AnalysisManager analysisManager, AnalysisRequest request) {
++		return runSynchronously(analysisManager, request, DEFAULT_TIMEOUT_SECS); // default is 
++	}
++
++	public static AnalysisRequestStatus runSynchronously(AnalysisManager analysisManager, AnalysisRequest request, int timeoutInSeconds) {
++		Logger logger = Logger.getLogger(ODFUtils.class.getName());
++		AnalysisResponse response = analysisManager.runAnalysis(request);
++		if (response.isInvalidRequest()) {
++			AnalysisRequestStatus status = new AnalysisRequestStatus();
++			status.setState(AnalysisRequestStatus.State.ERROR);
++			status.setDetails(MessageFormat.format("Request was invalid. Details: {0}", response.getDetails()));
++			status.setRequest(request);
++			return status;
++		}
++		AnalysisRequestStatus status = null;
++		long startTime = System.currentTimeMillis();
++		boolean timeOutReached = false;
++		do {
++			logger.fine("Polling for result...");
++			status = analysisManager.getAnalysisRequestStatus(response.getId());
++			try {
++				Thread.sleep(1000);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++			}
++			long currentTime = System.currentTimeMillis();
++			timeOutReached = (currentTime - startTime) > (timeoutInSeconds * 1000);
++		} while ((AnalysisRequestStatus.State.ACTIVE.equals(status.getState()) || AnalysisRequestStatus.State.QUEUED.equals(status.getState()) //
++				&& !timeOutReached));
++		return status;
++
++	}
++
++	public static AnalysisRequestStatus.State combineStates(List<AnalysisRequestStatus.State> allStates) {
++		// if one of the requests is in error, so is the complete request
++		if (allStates.contains(AnalysisRequestStatus.State.ERROR)) {
++			return AnalysisRequestStatus.State.ERROR;
++		}
++		// if no request could be found -> not found
++		if (Utils.containsOnly(allStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.NOT_FOUND })) {
++			return AnalysisRequestStatus.State.NOT_FOUND;
++		}
++		// if all request are either not found or finished -> finished
++		if (Utils.containsOnly(allStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.NOT_FOUND, AnalysisRequestStatus.State.FINISHED })) {
++			return AnalysisRequestStatus.State.FINISHED;
++		}
++		// else always return active
++		return AnalysisRequestStatus.State.ACTIVE;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java
+new file mode 100755
+index 0000000..e8361fd
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java
+@@ -0,0 +1,82 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.engine.EngineManager;
++import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class OpenDiscoveryFrameworkImpl implements OpenDiscoveryFramework {
++
++	private Logger logger = Logger.getLogger(OpenDiscoveryFrameworkImpl.class.getName());
++
++	public OpenDiscoveryFrameworkImpl() {
++		if (!ODFInitializer.isRunning() && !ODFInitializer.isStartStopInProgress()) {
++			logger.log(Level.INFO, "Initializing Open Discovery Platform");
++			ODFInitializer.start();
++			getEngineManager().checkHealthStatus(); // This implicitly initializes the control center and the message queues
++			
++			logger.log(Level.INFO, "Open Discovery Platform successfully initialized.");
++			
++			// log active runtimes
++			ServiceRuntimesInfo activeRuntimesInfo = ServiceRuntimes.getRuntimesInfo(ServiceRuntimes.getActiveRuntimes());
++			try {
++				logger.log(Level.INFO, "Active runtimes: ''{0}''", JSONUtils.toJSON(activeRuntimesInfo));
++			} catch (JSONException e) {
++				logger.log(Level.WARNING, "Active runtime info has wrong format", e);
++			}
++		}
++	}
++
++	public AnalysisManager getAnalysisManager() {
++		return new ODFInternalFactory().create(AnalysisManager.class);
++	}
++
++	public DiscoveryServiceManager getDiscoveryServiceManager() {
++		return new ODFInternalFactory().create(DiscoveryServiceManager.class);
++	}
++
++	public EngineManager getEngineManager() {
++		return new ODFInternalFactory().create(EngineManager.class);
++	}
++
++	public SettingsManager getSettingsManager() {
++		return new ODFInternalFactory().create(SettingsManager.class);
++	}
++
++	public AnnotationStore getAnnotationStore() {
++		return new ODFInternalFactory().create(AnnotationStore.class);
++	}
++
++	public MetadataStore getMetadataStore() {
++		return new ODFInternalFactory().create(MetadataStore.class);
++	}
++
++	public JDBCMetadataImporter getJDBCMetadataImporter() {
++		return new ODFInternalFactory().create(JDBCMetadataImporter.class);
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java
+new file mode 100755
+index 0000000..e58dd37
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java
+@@ -0,0 +1,71 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++
++public class StandaloneEnvironment implements Environment {
++
++	@Override
++	public String getProperty(String propertyName) {
++		return System.getProperty(propertyName);
++	}
++
++	@Override
++	public String getCurrentUser() {
++		return System.getProperty("user.name");
++	}
++
++	@Override
++	public String getZookeeperConnectString() {
++		return getProperty("odf.zookeeper.connect");
++	}
++
++	@Override
++	public ConfigContainer getDefaultConfiguration() {
++		return Utils.readConfigurationFromClasspath("org/apache/atlas/odf/core/internal/odf-initial-configuration.json");
++	}
++
++	@Override
++	public Map<String, String> getPropertiesWithPrefix(String prefix) {
++		Map<String, String> foundProps = new HashMap<>();
++		Properties props = System.getProperties();
++		for (String key : props.stringPropertyNames()) {
++			if (key.startsWith(prefix)) {
++				foundProps.put(key, props.getProperty(key));
++			}
++		}
++		return foundProps;
++	}
++
++	@Override
++	public List<String> getActiveRuntimeNames() {
++		String p = getProperty("odf.active.runtimes");
++		if (p == null || p.equals("ALL")) {
++			return null;
++		}
++		if (p.equals("NONE")) {
++			return new ArrayList<>();
++		}
++		return Arrays.asList(p.split(","));
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java
+new file mode 100755
+index 0000000..060f9fb
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java
+@@ -0,0 +1,314 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core;
++
++import java.io.BufferedReader;
++import java.io.IOException;
++import java.io.InputStream;
++import java.io.InputStreamReader;
++import java.io.PrintWriter;
++import java.io.StringWriter;
++import java.lang.reflect.InvocationTargetException;
++import java.lang.reflect.Method;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++import java.util.StringTokenizer;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.settings.KafkaConsumerConfig;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++
++public class Utils {
++
++	static Logger logger = Logger.getLogger(Utils.class.getName());
++
++	private static final List<Class<? extends Object>> MERGABLE_CLASSES = Arrays.asList(ConfigContainer.class, KafkaConsumerConfig.class, ODFSettings.class, DiscoveryServiceProperties.class);
++
++	public static void mergeODFPOJOs(Object source, Object update) {
++		if (!source.getClass().isAssignableFrom(update.getClass())) {
++			return;
++		}
++
++		Method[] sourceMethods = source.getClass().getDeclaredMethods();
++
++		for (Method getterMethod : sourceMethods) {
++			if (getterMethod.getName().startsWith("get") || getterMethod.getName().startsWith("is")) {
++				String setterMethodName = getterMethod.getName().replaceFirst("get", "set");
++				if (getterMethod.getName().startsWith("is")) {
++					setterMethodName = setterMethodName.replaceFirst("is", "set");
++				}
++				try {
++					Method setterMethod = source.getClass().getDeclaredMethod(setterMethodName, getterMethod.getReturnType());
++					Object updateValue = getterMethod.invoke(update);
++					if (updateValue != null) {
++						Object sourceValue = getterMethod.invoke(source);
++
++						if (sourceValue != null && MERGABLE_CLASSES.contains(updateValue.getClass())) {
++							//Value is another POJO, must also try merging these instead of overwriting
++							mergeODFPOJOs(sourceValue, updateValue);
++							setterMethod.invoke(source, sourceValue);
++						} else if (sourceValue instanceof Map && updateValue instanceof Map) {
++							Map updateJSON = (Map) updateValue;
++							Map sourceJSON = (Map) sourceValue;
++							for (Object key : updateJSON.keySet()) {
++								sourceJSON.put(key, updateJSON.get(key));
++							}
++							setterMethod.invoke(source, sourceJSON);
++						} else {
++							setterMethod.invoke(source, updateValue);
++						}
++					}
++
++				} catch (NoSuchMethodException e) {
++					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, no matching method found for {2}!", source.getClass().getName(), update
++							.getClass().getName(), getterMethod.getName()), e);
++				} catch (SecurityException e) {
++					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} could not be accessed (SecurityException)!", source.getClass()
++							.getName(), update.getClass().getName(), setterMethodName), e);
++				} catch (IllegalAccessException e) {
++					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} could not be accessed! (IllegalAccessException)", source.getClass()
++							.getName(), update.getClass().getName(), getterMethod.getName()), e);
++				} catch (IllegalArgumentException e) {
++					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} does not accept the right parameters!", source.getClass().getName(),
++							update.getClass().getName(), setterMethodName), e);
++				} catch (InvocationTargetException e) {
++					e.printStackTrace();
++					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} or {3} could not be invoked!", source.getClass().getName(), update
++							.getClass().getName(), getterMethod.getName(), setterMethodName), e);
++				}
++
++			}
++		}
++	}
++
++	public static Properties readConfigProperties(String path) {
++		// TODO cache this in static variables, it doesn't change at runtime 
++		InputStream is = Utils.class.getClassLoader().getResourceAsStream(path);
++		if (is == null) {
++			return null;
++		}
++		Properties props = new Properties();
++		try {
++			props.load(is);
++		} catch (IOException e) {
++			throw new RuntimeException(e);
++		}
++		return props;
++	}
++
++	public static void setCurrentTimeAsLastModified(AnalysisRequestTracker tracker) {
++		tracker.setLastModified(System.currentTimeMillis());
++	}
++
++	public static String getExceptionAsString(Throwable exc) {
++		StringWriter sw = new StringWriter();
++		PrintWriter pw = new PrintWriter(sw);
++		exc.printStackTrace(pw);
++		String st = sw.toString();
++		return st;
++	}
++
++	public static String collectionToString(Collection<?> coll, String separator) {
++		StringBuffer buf = null;
++		for (Object o : coll) {
++			if (buf == null) {
++				buf = new StringBuffer("[ ");
++			} else {
++				buf.append(separator);
++			}
++			buf.append(o.toString());
++		}
++		buf.append(" ]");
++		return buf.toString();
++	}
++
++	public static <T> boolean containsOnly(List<T> l, T[] elements) {
++		for (T t : l) {
++			boolean containsOnlyElements = false;
++			for (T el : elements) {
++				if (t.equals(el)) {
++					containsOnlyElements = true;
++					break;
++				}
++			}
++			if (!containsOnlyElements) {
++				return false;
++			}
++		}
++		return true;
++	}
++
++	public static <T> boolean containsNone(List<T> l, T[] elements) {
++		for (T t : l) {
++			boolean containsAnyElement = false;
++			for (T el : elements) {
++				if (t.equals(el)) {
++					containsAnyElement = true;
++					break;
++				}
++			}
++			if (containsAnyElement) {
++				return true;
++			}
++		}
++		return false;
++	}
++
++	public static List<String> splitString(String s, char separator) {
++		List<String> l = new ArrayList<String>();
++		if (s != null) {
++			StringTokenizer tok = new StringTokenizer(s, String.valueOf(separator));
++			while (tok.hasMoreTokens()) {
++				l.add(tok.nextToken());
++			}
++		}
++		return l;
++	}
++
++	public static String getInputStreamAsString(InputStream is, String encoding) {
++		try {
++			final int n = 2048;
++			byte[] b = new byte[0];
++			byte[] temp = new byte[n];
++			int bytesRead;
++			while ((bytesRead = is.read(temp)) != -1) {
++				byte[] newB = new byte[b.length + bytesRead];
++				System.arraycopy(b, 0, newB, 0, b.length);
++				System.arraycopy(temp, 0, newB, b.length, bytesRead);
++				b = newB;
++			}
++			String s = new String(b, encoding);
++			return s;
++		} catch (IOException exc) {
++			return getExceptionAsString(exc);
++		}
++	}
++
++	public static void mergeJSONObjects(JSONObject source, JSONObject target) {
++		if (source != null && target != null) {
++			target.putAll(source);
++		}
++	}
++
++	public static <T> T getValue(T value, T defaultValue) {
++		if (value == null) {
++			return defaultValue;
++		}
++		return value;
++	}
++
++	public static String getSystemPropertyExceptionIfMissing(String propertyName) {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		String value = env.getProperty(propertyName);
++		if (value == null) {
++			String msg = MessageFormat.format("System property ''{0}'' is not set", propertyName);
++			logger.log(Level.SEVERE, msg);
++			throw new RuntimeException(msg);
++		}
++		return value;
++	}
++	
++	public static int getIntEnvironmentProperty(String propertyName, int defaultValue) {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		String value = env.getProperty(propertyName);
++		if (value == null) {
++			return defaultValue;
++		}
++		try {
++			return Integer.parseInt(value);
++		} catch(NumberFormatException exc) {
++			return defaultValue;
++		}
++	}
++
++
++	public static void runSystemCommand(String command) {
++		logger.log(Level.INFO, "Running system command: " + command);
++		try {
++			Runtime r = Runtime.getRuntime();
++			Process p = r.exec(command);
++			p.waitFor();
++			BufferedReader b = new BufferedReader(new InputStreamReader(p.getInputStream()));
++			String line = "";
++			while ((line = b.readLine()) != null) {
++				logger.log(Level.INFO, "System command out: " + line);
++			}
++			b.close();
++		} catch(IOException | InterruptedException e) {
++			logger.log(Level.INFO, "Error executing system command.", e);
++		}
++	}
++	
++	public static ConfigContainer readConfigurationFromClasspath(String jsonFileInClasspath) {
++		InputStream is = SettingsManager.class.getClassLoader().getResourceAsStream(jsonFileInClasspath);
++		try {
++			JSONObject configJSON = new JSONObject(is);
++			ConfigContainer config = JSONUtils.fromJSON(configJSON.write(), ConfigContainer.class);
++			return config;
++		} catch (Exception exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++
++	public static String joinStrings(List<String> l, char separator) {
++		String result = null;
++		if ((l != null) && !l.isEmpty()) {
++			StringBuilder buf = null;
++			for (String s : l) {
++				if (buf == null) {
++					buf = new StringBuilder();
++				} else {
++					buf.append(separator);
++				}
++				buf.append(s);
++			}
++			result = buf.toString();
++		}
++		return result;
++	}
++	
++	public static String getEnvironmentProperty(String name, String defaultValue) {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		String s = env.getProperty(name);
++		return s != null ? s : defaultValue;		
++	}
++	
++	public static long getEnvironmentProperty(String name, long defaultValue) {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		String s = env.getProperty(name);
++		if (s == null) {
++			return defaultValue;
++		}
++		try {
++			return Long.parseLong(s);
++		} catch(NumberFormatException exc) {
++			String msg = MessageFormat.format("Property ''{0}'' could not be converted to an integer", new Object[]{name});
++			logger.log(Level.WARNING, msg);
++			return defaultValue;
++		}
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java
+new file mode 100755
+index 0000000..8f7fab2
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java
+@@ -0,0 +1,177 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.analysis;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.ODFUtils;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++ *
++ * External Java API for creating and managing analysis requests
++ *
++ */
++public class AnalysisManagerImpl implements AnalysisManager {
++
++	public final static char COMPOUND_REQUEST_SEPARATOR = ',';
++	private Logger logger = Logger.getLogger(AnalysisManagerImpl.class.getName());
++	private ControlCenter controlCenter;
++
++	public AnalysisManagerImpl() {
++		controlCenter = new ODFInternalFactory().create(ControlCenter.class);
++	}
++
++	/**
++	 * Issues a new ODF analysis request
++	 *
++	 * @param request Analysis request
++	 * @return Response containing the request id and status information
++	 */
++	public AnalysisResponse runAnalysis(AnalysisRequest request) {
++		if (((request.getDiscoveryServiceSequence() == null) || request.getDiscoveryServiceSequence().isEmpty())
++			&& ((request.getAnnotationTypes() == null) || request.getAnnotationTypes().isEmpty())) {
++			AnalysisResponse response = new AnalysisResponse();
++			response.setId(request.getId());
++			response.setDetails("Either a sequence of discovery service ids or a list of annotation types must be specified to initiate an analysis request.");
++			response.setInvalidRequest(true);
++			return response;
++		}
++
++		if ((request.getDataSets().size() == 1) || request.isProcessDataSetsSequentially()) {
++			logger.log(Level.INFO, "Using sequential request processing (maybe because there is only a single data set)");
++			AnalysisResponse response = controlCenter.startRequest(request);
++			logger.log(Level.INFO, "Request with ID ''{0}'' started on data sets ''{1}''. Complete request: {2}.",
++					new Object[] { response.getId(), request.getDataSets(), JSONUtils.lazyJSONSerializer(request) });
++			return response;
++		}
++
++		List<String> requestIDs = new ArrayList<String>();
++		List<String> detailsMessages = new ArrayList<String>();
++		boolean invalidRequest = true;
++		logger.log(Level.INFO, "Running requests for ''{0}'' data sets in parallel", request.getDataSets().size());
++		logger.log(Level.FINE, "Splitting request into multiple request for each data set. Data Sets: {0}", request.getDataSets());
++		for (MetaDataObjectReference dataSet : request.getDataSets()) {
++			AnalysisRequest partRequest = new AnalysisRequest();
++			partRequest.setDiscoveryServiceSequence(request.getDiscoveryServiceSequence());
++			partRequest.setAdditionalProperties(request.getAdditionalProperties());
++			partRequest.setDataSets(Collections.singletonList(dataSet));
++			AnalysisResponse partResponse = controlCenter.startRequest(partRequest);
++			if (!partResponse.isInvalidRequest()) {
++				String partRequestID = partResponse.getId();
++				requestIDs.add(partRequestID);
++				detailsMessages.add(partResponse.getDetails());
++				// as soon as one request is valid, we make the compound request valid
++				invalidRequest = false;
++			}
++		}
++		AnalysisResponse response = new AnalysisResponse();
++		response.setId(Utils.joinStrings(requestIDs, COMPOUND_REQUEST_SEPARATOR));
++		response.setDetails(Utils.joinStrings(detailsMessages, COMPOUND_REQUEST_SEPARATOR));
++		response.setInvalidRequest(invalidRequest);
++		return response;
++	}
++
++	/**
++	 * Retrieve status of an ODF analysis request
++	 *
++	 * @param requestId Unique id of the analysis request
++	 * @return Status of the analysis request
++	 */
++	public AnalysisRequestStatus getAnalysisRequestStatus(String requestId) {
++		List<String> singleRequestIds = Utils.splitString(requestId, COMPOUND_REQUEST_SEPARATOR);
++		if (singleRequestIds.size() == 1) {
++			AnalysisRequestStatus status = controlCenter.getRequestStatus(requestId);
++			return status;
++		}
++		AnalysisRequestStatus compoundStatus = new AnalysisRequestStatus();
++		compoundStatus.setState(State.QUEUED);
++		AnalysisRequest compoundRequest = new AnalysisRequest(); // assemble a compound request 
++		compoundRequest.setId(requestId);
++		List<String> allMessages = new ArrayList<String>();
++		List<MetaDataObjectReference> allDataSets = new ArrayList<>();
++		List<State> allStates = new ArrayList<>();
++		for (String singleRequestId : singleRequestIds) {	
++			AnalysisRequestStatus singleStatus = controlCenter.getRequestStatus(singleRequestId);
++			if (compoundRequest.getDiscoveryServiceSequence() == null) {
++				// assume all fields of the single requests are the same
++				// since they were created through runAnalysis()
++				compoundRequest.setDiscoveryServiceSequence(singleStatus.getRequest().getDiscoveryServiceSequence());
++				compoundRequest.setAdditionalProperties(singleStatus.getRequest().getAdditionalProperties());
++			}
++			if (singleStatus.getRequest().getDataSets() != null) {
++				allDataSets.addAll(singleStatus.getRequest().getDataSets());
++			}
++			allStates.add(singleStatus.getState());
++			allMessages.add(singleStatus.getDetails());
++		}
++		compoundRequest.setDataSets(allDataSets);
++
++		compoundStatus.setState(ODFUtils.combineStates(allStates));
++		compoundStatus.setRequest(compoundRequest);
++		compoundStatus.setDetails(Utils.joinStrings(allMessages, COMPOUND_REQUEST_SEPARATOR));
++		return compoundStatus;
++	}
++
++	/**
++	 * Retrieve statistics about all previous ODF analysis requests
++	 *
++	 * @return Request summary
++	 */
++	public AnalysisRequestSummary getAnalysisStats() {
++		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
++		return store.getRequestSummary();
++	}
++
++	/**
++	 * Retrieve status details of recent ODF analysis requests
++	 *
++	 * @param offset Starting offset (use 0 to start with the latest request)
++	 * @param limit Maximum number of analysis requests to be returned (use -1 to retrieve all requests)
++	 * @return Status details for each discovery request
++	 */
++	public AnalysisRequestTrackers getAnalysisRequests(int offset, int limit) {
++		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
++		AnalysisRequestTrackers analysisrequestTrackers = new AnalysisRequestTrackers();
++		analysisrequestTrackers.setAnalysisRequestTrackers(store.getRecentTrackers(offset, limit));
++		return analysisrequestTrackers;
++	}
++
++	/**
++	 * Request a specific ODF discovery request to be canceled
++	 *
++	 * @param requestId Unique id of the analysis request
++	 * @return Status of the cancellation attempt
++	 */
++	public AnalysisCancelResult cancelAnalysisRequest(String requestId) {
++		return controlCenter.cancelRequest(requestId);
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java
+new file mode 100755
+index 0000000..798b2d3
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java
+@@ -0,0 +1,48 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.annotation;
++
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++
++public class InternalAnnotationStoreUtils {
++
++	public static void storeDiscoveryServiceResult(DiscoveryServiceResult result, AnalysisRequest req) {
++		Logger logger = Logger.getLogger(InternalAnnotationStoreUtils.class.getName());
++		AnnotationStore mds = new ODFFactory().create().getAnnotationStore();
++		mds.setAnalysisRun(req.getId());
++		if (result != null) {
++			logger.log(Level.FINE, "Persisting annotations returned by discovery service");
++			List<Annotation> annotations = result.getAnnotations();
++			if (annotations != null) {
++				for (Annotation annot : annotations) {
++					// only persist if reference was not set
++					if (annot.getReference() == null) {
++						mds.store(annot);
++					} else {
++						logger.log(Level.WARNING, "Returned annotation object has a non-null reference set and will not be persisted (reference: {0})", annot.getReference().toString());
++					}
++				}
++			}
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java
+new file mode 100755
+index 0000000..f779155
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java
+@@ -0,0 +1,68 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.configuration;
++
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++import io.swagger.annotations.ApiModel;
++import io.swagger.annotations.ApiModelProperty;
++
++/**
++ * { 
++ *  	"odf" : {...},
++ *  	"userDefined" : {...}
++ * }
++ *
++ *
++ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
++ */
++@ApiModel(description="All ODF configuration options.")
++public final class ConfigContainer {
++
++	@ApiModelProperty(value="General ODF configuration options along with details about available discovery services", required=true)
++	private ODFSettings odf;
++
++	@ApiModelProperty(value="Details about available discovery services")
++	private List<DiscoveryServiceProperties> registeredServices = null;
++
++	public List<DiscoveryServiceProperties> getRegisteredServices() {
++		return registeredServices;
++	}
++
++	public void setRegisteredServices(List<DiscoveryServiceProperties> registeredServices) {
++		this.registeredServices = registeredServices;
++	}
++
++	public ODFSettings getOdf() {
++		return odf;
++	}
++
++	public void setOdf(ODFSettings odfSettings) {
++		this.odf = odfSettings;
++	}
++
++	public void validate() throws ValidationException {
++		if (this.odf != null) {
++			odf.validate();
++		}
++		if (this.registeredServices != null) {
++			new ServiceValidator().validate("ODFConfig.registeredServices", this.registeredServices);
++		}
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java
+new file mode 100755
+index 0000000..7ad90e6
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java
+@@ -0,0 +1,235 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.configuration;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.net.URL;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Enumeration;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.settings.SparkConfig;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
++
++public class ConfigManager {
++	private Logger logger = Logger.getLogger(ConfigManager.class.getName());
++	public static final String HIDDEN_PASSWORD_IDENTIFIER = "***hidden***";
++	public static final long CONFIG_UPDATE_SLEEP_BETWEEN_POLLS = 20;
++	public static final int CONFIG_UPDATE_MAX_POLLS = 1500;
++	private static final String DEFAULT_ENCRYPTED_SPARK_CONFIGS = "spark.authenticate.secret,spark.ssl.keyPassword,spark.ssl.keyStorePassword,spark.ssl.trustStorePassword";
++
++	protected ODFConfigurationStorage configurationStore;
++	protected ODFConfigNotificationPublisher notificationManager;
++
++	public ConfigManager() {
++		ODFInternalFactory f = new ODFInternalFactory();
++		this.configurationStore = f.create(ODFConfigurationStorage.class);
++		this.notificationManager = f.create(ODFConfigNotificationPublisher.class);
++	}
++
++	public ConfigContainer getConfigContainer() {
++		ConfigContainer config = configurationStore.getConfig(getDefaultConfigContainer());
++		return config;
++	}
++
++	public ConfigContainer getConfigContainerHidePasswords() {
++		ConfigContainer config = configurationStore.getConfig(getDefaultConfigContainer());
++		hidePasswords(config);
++		return config;
++	}
++
++	public void updateConfigContainer(ConfigContainer update) throws ValidationException {
++		try {
++			update = JSONUtils.cloneJSONObject(update);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++		update.validate();
++		ConfigContainer source = getConfigContainer();
++		unhideAndEncryptPasswords(update, source);
++
++		List<DiscoveryServiceProperties> newServicesToRun = new ArrayList<DiscoveryServiceProperties>();
++		if (update.getRegisteredServices() != null
++				&& source.getRegisteredServices().size() < update.getRegisteredServices().size()) {
++			// store added services if update registers new ones
++			List<DiscoveryServiceProperties> newRegisteredServices = new ArrayList<DiscoveryServiceProperties>();
++			newRegisteredServices.addAll(update.getRegisteredServices());
++			for (DiscoveryServiceProperties oldService : source.getRegisteredServices()) {
++				for (int no = 0; no < newRegisteredServices.size(); no++) {
++					if (newRegisteredServices.get(no).getId().equals(oldService.getId())) {
++						newRegisteredServices.remove(no);
++						break;
++					}
++				}
++			}
++
++			newServicesToRun.addAll(newRegisteredServices);
++		}
++
++		Utils.mergeODFPOJOs(source, update);
++		configurationStore.storeConfig(source);
++
++		if (source.getOdf().getRunNewServicesOnRegistration() && !newServicesToRun.isEmpty()) {
++			runNewServices(newServicesToRun);
++		}
++
++		String changeId = UUID.randomUUID().toString();
++		configurationStore.addPendingConfigChange(changeId);
++		this.notificationManager.publishConfigChange(source, changeId);
++		for (int i=0; i < CONFIG_UPDATE_MAX_POLLS; i++) {
++			if (!configurationStore.isConfigChangePending(changeId)) {
++				logger.log(Level.INFO, MessageFormat.format("Config change id ''{0}'' successfully completed after {1} msec.", new Object[] { changeId, i * CONFIG_UPDATE_SLEEP_BETWEEN_POLLS } ));
++				return;
++			}
++			try {
++				Thread.sleep(CONFIG_UPDATE_SLEEP_BETWEEN_POLLS);
++			} catch (InterruptedException e) {
++				// Ignore interrupt
++				logger.log(Level.WARNING, "Sleep period was interrupted", e);
++			}
++		}
++		logger.log(Level.WARNING, MessageFormat.format("Config change did not complete after {0} msec.", CONFIG_UPDATE_SLEEP_BETWEEN_POLLS * CONFIG_UPDATE_MAX_POLLS));
++	}
++
++	public void resetConfigContainer() {
++		logger.warning("resetting ODF configuration!");
++		configurationStore.storeConfig(getDefaultConfigContainer());
++	}
++
++	private static String defaultConfig = null;
++
++	List<DiscoveryServiceProperties> getServicesFoundOnClassPath() throws IOException, JSONException {
++		ClassLoader cl = this.getClass().getClassLoader();
++		Enumeration<URL> services = cl.getResources("META-INF/odf/odf-services.json");
++		List<DiscoveryServiceProperties> result = new ArrayList<>();
++		while (services.hasMoreElements()) {
++			URL url = services.nextElement();
++			InputStream is = url.openStream();
++			String json = Utils.getInputStreamAsString(is, "UTF-8");
++			logger.log(Level.INFO, "Service found on the classpath at {0}: {1}", new Object[] { url, json });
++			result.addAll(JSONUtils.fromJSONList(json, DiscoveryServiceProperties.class));
++		}
++		logger.log(Level.INFO, "Number of classpath services found: {0}", result.size());
++		return result;
++	}
++
++	private ConfigContainer getDefaultConfigContainer() {
++		if (defaultConfig == null) {			
++			try {
++				ConfigContainer config = new ODFInternalFactory().create(Environment.class).getDefaultConfiguration();
++				// now look for services found on the classpath
++				config.getRegisteredServices().addAll(getServicesFoundOnClassPath());
++				defaultConfig = JSONUtils.toJSON(config);
++			} catch (IOException | JSONException e) {
++				String msg = "Default config could not be loaded or parsed!";
++				logger.severe(msg);
++				throw new RuntimeException(msg, e);
++			}
++		}
++		try {
++			return JSONUtils.fromJSON(defaultConfig, ConfigContainer.class);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++	}
++
++	private void runNewServices(List<DiscoveryServiceProperties> newServices) {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		List<String> servicesToRun = new ArrayList<String>();
++		for (DiscoveryServiceProperties info : newServices) {
++			servicesToRun.add(info.getId());
++		}
++
++		AnalysisRequest req = new AnalysisRequest();
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		req.setDiscoveryServiceSequence(servicesToRun);
++		req.setDataSets(mds.search(mds.newQueryBuilder().objectType("DataSet").build()));
++		req.setIgnoreDataSetCheck(true);
++		cc.startRequest(req);
++	}
++
++	private void unhideAndEncryptPasswords(ConfigContainer updatedConfigContainer,
++			ConfigContainer originalConfiguration) {
++		if (updatedConfigContainer.getOdf() != null) {
++			String odfPassword = updatedConfigContainer.getOdf().getOdfPassword();
++			if (odfPassword != null) {
++				if (odfPassword.equals(HIDDEN_PASSWORD_IDENTIFIER)) {
++					// Password was not changed, therefore keep original
++					// encrypted password
++					updatedConfigContainer.getOdf().setOdfPassword(originalConfiguration.getOdf().getOdfPassword());
++				} else if (!Encryption.isEncrypted(odfPassword)) {
++					updatedConfigContainer.getOdf().setOdfPassword(Encryption.encryptText(odfPassword));
++				}
++			}
++			if (updatedConfigContainer.getOdf().getSparkConfig() != null) {
++				SparkConfig updatedSparkConfig = updatedConfigContainer.getOdf().getSparkConfig();
++				if (updatedSparkConfig.getConfigs() != null) {
++					List<String> encryptedSparkConfigs = Arrays.asList(DEFAULT_ENCRYPTED_SPARK_CONFIGS.split(","));
++					for (String configName : updatedSparkConfig.getConfigs().keySet()) {
++						if (encryptedSparkConfigs.contains(configName)) {
++							String updatedConfigValue = (String) updatedSparkConfig.getConfigs().get(configName);
++							if (updatedConfigValue.equals(HIDDEN_PASSWORD_IDENTIFIER)) {
++								// Encrypted value was not changed, therefore keep original
++								// Encrypted value
++								SparkConfig originalSparkConfig = originalConfiguration.getOdf().getSparkConfig();
++								updatedSparkConfig.setConfig(configName, originalSparkConfig.getConfigs().get(configName));
++							} else if (!Encryption.isEncrypted(updatedConfigValue)) {
++								updatedSparkConfig.setConfig(configName, Encryption.encryptText(updatedConfigValue));
++							}
++						}
++					}
++				}
++			}
++		}
++	}
++
++	private void hidePasswords(ConfigContainer configContainer) {
++		if (configContainer.getOdf() != null) {
++			if (configContainer.getOdf().getOdfPassword() != null) {
++				configContainer.getOdf().setOdfPassword(HIDDEN_PASSWORD_IDENTIFIER);
++			}
++			if ((configContainer.getOdf().getSparkConfig() != null)){
++				SparkConfig sparkConfig = configContainer.getOdf().getSparkConfig();
++				if (sparkConfig.getConfigs() != null) {
++					List<String> encryptedSparkConfigs = Arrays.asList(DEFAULT_ENCRYPTED_SPARK_CONFIGS.split(","));
++					for (String configName : sparkConfig.getConfigs().keySet()) {
++						if (((encryptedSparkConfigs.contains(configName)) && (sparkConfig.getConfigs().get(configName)) != null)) {
++							sparkConfig.setConfig(configName, HIDDEN_PASSWORD_IDENTIFIER);
++						}
++					}
++				}
++			}
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java
+new file mode 100755
+index 0000000..a7f822f
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java
+@@ -0,0 +1,45 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.configuration;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ODFConfigNotificationPublisher {
++
++	Logger logger = Logger.getLogger(ODFConfigNotificationPublisher.class.getName());
++
++	public void publishConfigChange(ConfigContainer update, String changeId) {
++		try {
++			logger.log(Level.FINE, "publishing config change: {0}", JSONUtils.toJSON(update));
++			ConfigContainer clone = JSONUtils.fromJSON(JSONUtils.toJSON(update), ConfigContainer.class);
++			AdminMessage amsg = new AdminMessage();
++			amsg.setId(changeId);
++			amsg.setAdminMessageType(Type.CONFIGCHANGE);
++			amsg.setConfigUpdateDetails(clone);
++			amsg.setDetails("Configuration update");
++			DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
++			qm.enqueueInAdminQueue(amsg);
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An unexpected exception occurres when writing to admin queue. Ignoring it", exc);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java
+new file mode 100755
+index 0000000..011d728
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java
+@@ -0,0 +1,75 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.configuration;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.List;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.settings.validation.PropertyValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++
++public class ServiceValidator implements PropertyValidator {
++
++	public void validate(String property, Object value) throws ValidationException {
++		validate(property, value, true);
++	}
++
++	private void validate(String property, Object value, boolean topLevel) throws ValidationException {
++		if (value == null) {
++			throw new ValidationException("Null values are not allowed for this property");
++		}
++
++		if (value instanceof List) {
++			List<DiscoveryServiceProperties> newServices = (List<DiscoveryServiceProperties>) value;
++			List<String> ids = new ArrayList<String>();
++			for (int no = 0; no < newServices.size(); no++) {
++				DiscoveryServiceProperties service = (DiscoveryServiceProperties) newServices.get(no);
++				validate(property, service, false);
++				String serviceId = service.getId();
++				if (ids.contains(serviceId)) {
++					throw new ValidationException(property, MessageFormat.format("you cannot register multiple services with the same id {0}!", serviceId));
++				} else {
++					ids.add(serviceId);
++				}
++			}
++		} else if (value instanceof DiscoveryServiceProperties) {
++			DiscoveryServiceProperties service = (DiscoveryServiceProperties) value;
++			if (service.getId() == null || service.getId().trim().isEmpty() || service.getName() == null || service.getName().trim().isEmpty() || service.getEndpoint() == null) {
++				throw new ValidationException(property, MessageFormat.format("A service requires {0}", "id, name and an endpoint"));
++			}
++
++			if (topLevel) {
++				List<String> regServices = new ArrayList<String>();
++				List<DiscoveryServiceProperties> services = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties();
++				for (DiscoveryServiceProperties regService : services) {
++					regServices.add(regService.getId());
++				}
++
++				if (regServices.contains(service.getId())) {
++					throw new ValidationException(property, MessageFormat.format("a service with id {0} already exists!", service.getId()));
++				}
++			}
++
++			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(service);
++			runtime.validate(service);
++		} else {
++			throw new ValidationException(property, "only DiscoveryServiceRegistrationInfo objects or list of such objects are allowed for this property");
++		}
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java
+new file mode 100755
+index 0000000..fffff6f
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java
+@@ -0,0 +1,60 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++
++// JSON
++public class AdminMessage {
++	public static enum Type {
++		SHUTDOWN, RESTART, CONFIGCHANGE
++	}
++
++	private Type adminMessageType;
++	private String details;
++	private ConfigContainer configUpdateDetails;
++	private String messageId;
++
++	public Type getAdminMessageType() {
++		return adminMessageType;
++	}
++
++	public void setAdminMessageType(Type adminMessageType) {
++		this.adminMessageType = adminMessageType;
++	}
++
++	public String getDetails() {
++		return details;
++	}
++
++	public void setDetails(String details) {
++		this.details = details;
++	}
++
++	public ConfigContainer getConfigUpdateDetails() {
++		return configUpdateDetails;
++	}
++
++	public void setConfigUpdateDetails(ConfigContainer configUpdateDetails) {
++		this.configUpdateDetails = configUpdateDetails;
++	}
++
++	public String getId() {
++		return this.messageId;
++	}
++
++	public void setId(String messageId) {
++		this.messageId = messageId;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java
+new file mode 100755
+index 0000000..874e061
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java
+@@ -0,0 +1,92 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.core.ODFInitializer;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class AdminQueueProcessor implements QueueMessageProcessor {
++
++	private Logger logger = Logger.getLogger(AdminQueueProcessor.class.getName());
++
++	@Override
++	public void process(ExecutorService executorService, String msg, int partition, long offset) {
++		AdminMessage adminMessage;
++		try {
++			adminMessage = JSONUtils.fromJSON(msg, AdminMessage.class);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++		switch (adminMessage.getAdminMessageType()) {
++		case SHUTDOWN:
++			initiateShutdown(executorService, false);
++			break;
++		case RESTART:
++			initiateShutdown(executorService, true);
++			break;
++		default:
++			// do nothing
++		}
++	}
++
++	static Object restartLockObject = new Object();
++
++	private void initiateShutdown(ExecutorService executorService, final boolean restart) {
++		logger.log(Level.INFO, "Shutdown of ODF was requested...");
++		Runnable shutDownRunnable = new Runnable() {
++
++			@Override
++			public void run() {
++				logger.log(Level.INFO, "Initiating shutdown");
++
++				// sleep some time before initiating the actual shutdown to give the process() a chance to return
++				// before it is itself shut down
++				long sleepTimeBeforeShutdown = 1000;
++				try {
++					Thread.sleep(sleepTimeBeforeShutdown);
++				} catch (InterruptedException e) {
++					// do nothing
++					e.printStackTrace();
++				}
++
++				synchronized (restartLockObject) {
++					logger.log(Level.INFO, "Shutting down ODF...");
++					try {
++						ODFInitializer.stop();
++						logger.log(Level.INFO, "ODF was shutdown");
++											
++						if (restart) {
++							logger.log(Level.INFO, "Restarting ODF");
++							ODFInitializer.start();
++							logger.log(Level.INFO, "ODF restarted");
++						}
++					}  catch (Exception e) {
++						logger.log(Level.SEVERE, "An unexpected error occurred when shutting down ODF", e);
++					}
++				}
++
++			}
++
++		};
++
++		executorService.submit(shutDownRunnable);
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java
+new file mode 100755
+index 0000000..e43bd45
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java
+@@ -0,0 +1,53 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.List;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++
++
++public interface AnalysisRequestTrackerStore {
++	
++	/**
++	 * set the status of old requests which were last modified before the cutOffTimestamp
++	 * with an optional detailsMessage
++	 */
++	void setStatusOfOldRequest(long cutOffTimestamp, STATUS status, String detailsMessage);
++	
++	// store / update the passed tracker
++	void store(AnalysisRequestTracker tracker);
++	
++	AnalysisRequestTracker query(String analysisRequestId);
++
++	AnalysisRequestTracker findSimilarQueuedRequest(AnalysisRequest request);
++	
++	/**
++	 * @param number - number of trackers to retrieve, -1 for all
++	 * @return
++	 */
++	List<AnalysisRequestTracker> getRecentTrackers(int offset, int limit);
++	
++	/**
++	 * Clear any internal caches, if any.
++	 */
++	void clearCache(); 
++
++	int getSize();
++
++	AnalysisRequestSummary getRequestSummary();
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java
+new file mode 100755
+index 0000000..8100f18
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java
+@@ -0,0 +1,108 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.core.Utils;
++
++public class AsyncDiscoveryServiceWrapper implements SyncDiscoveryService {
++
++	AsyncDiscoveryService wrappedService = null;
++
++	public AsyncDiscoveryServiceWrapper(AsyncDiscoveryService wrappedService) {
++		this.wrappedService = wrappedService;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		try {
++			DiscoveryServiceAsyncStartResponse asyncResponse = wrappedService.startAnalysis(request);
++			ResponseCode code = asyncResponse.getCode();
++			if (code != ResponseCode.OK) {
++				DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++				response.setCode(code);
++				response.setDetails(asyncResponse.getDetails());
++				return response;
++			}
++			// poll the async service
++			final long maxWaitTimeSecs = Utils.getEnvironmentProperty("odf.async.max.wait.secs", 10 * 60); // default: 10 minutes
++			final long pollingIntervalMS = Utils.getEnvironmentProperty("odf.async.poll.interval.ms", 1000);
++			long maxPolls = (maxWaitTimeSecs * 1000) / pollingIntervalMS;
++			int pollCounter = 0;
++
++			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++			String runId = asyncResponse.getRunId();
++			while (pollCounter < maxPolls) {
++				Thread.sleep(pollingIntervalMS);
++				DiscoveryServiceAsyncRunStatus status = wrappedService.getStatus(runId);
++				switch (status.getState()) {
++				case NOT_FOUND:
++					// should not happen
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Run ID " + runId + " was not found. This should not have happened.");
++					return response;
++				case ERROR:
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails(status.getDetails());
++					return response;
++				case FINISHED:
++					response.setCode(ResponseCode.OK);
++					response.setDetails(status.getDetails());
++					response.setResult(status.getResult());
++					return response;
++				default:
++					// continue polling
++					pollCounter++;
++				}
++			}
++			response.setCode(ResponseCode.UNKNOWN_ERROR);
++			response.setDetails("Polled Async service for " + maxWaitTimeSecs + " seconds without positive result");
++			return response;
++		} catch (Exception exc) {
++			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++			response.setCode(ResponseCode.UNKNOWN_ERROR);
++			response.setDetails("An unknown error occurred: " + Utils.getExceptionAsString(exc));
++			return response;
++		}
++	}
++
++	public void setExecutorService(ExecutorService executorService) {
++		wrappedService.setExecutorService(executorService);
++	}
++
++	public void setMetadataStore(MetadataStore metadataStore) {
++		wrappedService.setMetadataStore(metadataStore);
++	}
++
++	public void setAnnotationStore(AnnotationStore annotationStore) {
++		wrappedService.setAnnotationStore(annotationStore);
++	}
++
++	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
++		return wrappedService.checkDataSet(dataSetContainer);
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java
+new file mode 100755
+index 0000000..bcd2965
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java
+@@ -0,0 +1,45 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
++import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ConfigChangeQueueProcessor implements QueueMessageProcessor {
++
++	Logger logger = Logger.getLogger(ConfigChangeQueueProcessor.class.getName());
++	
++	@Override
++	public void process(ExecutorService executorService, String msg, int partition, long offset) {
++		try {
++			AdminMessage amsg = JSONUtils.fromJSON(msg, AdminMessage.class);
++			if (Type.CONFIGCHANGE.equals(amsg.getAdminMessageType())) {
++				logger.info("Received config change: " + JSONUtils.toJSON(amsg));
++				ODFInternalFactory f = new ODFInternalFactory();
++				ODFConfigurationStorage configStorage = f.create(ODFConfigurationStorage.class);
++				configStorage.onConfigChange(amsg.getConfigUpdateDetails());
++				configStorage.removePendingConfigChange(amsg.getId());
++			}
++		} catch(Exception exc) {
++			logger.log(Level.WARNING, "An exception occurred while processing admin message", exc);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java
+new file mode 100755
+index 0000000..4ffa195
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java
+@@ -0,0 +1,454 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Set;
++import java.util.UUID;
++import java.util.concurrent.Callable;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.api.analysis.*;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.core.Utils;
++
++public class ControlCenter {
++
++	private static final String CLASSNAME = ControlCenter.class.getName();
++	private Logger logger = Logger.getLogger(ControlCenter.class.getName());
++
++	public static final String HEALTH_TEST_DISCOVERY_SERVICE_ID = "odf-health-test-discovery-service-id";
++	public static final String HEALTH_TEST_DATA_SET_ID_PREFIX = "odf-health-test-dummy-data-set-id";
++
++	DiscoveryServiceQueueManager queueManager = null;
++	AnalysisRequestTrackerStore store = null;
++	Environment environment = null;
++	OpenDiscoveryFramework odf;
++
++	public ControlCenter() {
++		ODFInternalFactory f = new ODFInternalFactory();
++		queueManager = f.create(DiscoveryServiceQueueManager.class);
++		store = f.create(AnalysisRequestTrackerStore.class);
++		odf = new ODFFactory().create();
++		environment = f.create(Environment.class);
++	}
++
++	private String createNewRequestId() {
++		return "odf-request-" + UUID.randomUUID().toString() + "_" + System.currentTimeMillis();
++	}
++
++	public DiscoveryServiceQueueManager getQueueManager() {
++		return queueManager;
++	}
++
++	public AnalysisResponse startRequest(AnalysisRequest request) {
++		final String METHODNAME = "startRequest()";
++		logger.entering(CLASSNAME, METHODNAME);
++		AnalysisResponse response = new AnalysisResponse();
++		AnalysisRequest requestWithServiceSequence = null;
++		try {
++			requestWithServiceSequence = JSONUtils.fromJSON(JSONUtils.toJSON(request), AnalysisRequest.class);
++		} catch (JSONException e) {
++			throw new RuntimeException("Error cloning analysis request.");
++		}
++		if ((request.getDiscoveryServiceSequence() == null) || request.getDiscoveryServiceSequence().isEmpty()) {
++			DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
++			List<String> discoveryServiceSequence = mapper.getRecommendedDiscoveryServiceSequence();
++			logger.log(Level.INFO, "Using discovery service sequence: " + Utils.joinStrings(discoveryServiceSequence, ','));
++			if (discoveryServiceSequence == null) {
++				response.setId(request.getId());
++				response.setInvalidRequest(true);
++				response.setDetails("No suitable discovery services found to create the requested annotation types.");
++				return response;
++			}
++			requestWithServiceSequence.setDiscoveryServiceSequence(discoveryServiceSequence);
++		}
++		try {
++			//Initialize queues to make sure analysis can be started
++			queueManager.start();
++		} catch (TimeoutException e) {
++			logger.warning("queues could not be started in time");
++		}
++		AnalysisRequestTracker similarTracker = store.findSimilarQueuedRequest(requestWithServiceSequence);
++		if (similarTracker != null) {
++			logger.log(Level.WARNING, "A similar request for the issued one is already in the queue.");
++			logger.log(Level.FINE, "A similar request for the issued one is already in the queue. Original request: {0}, found similar request: {1}",
++					new Object[] { JSONUtils.lazyJSONSerializer(requestWithServiceSequence),
++					JSONUtils.lazyJSONSerializer(similarTracker) });
++		}
++		String newRequestId = createNewRequestId();
++		response.setId(newRequestId);
++		requestWithServiceSequence.setId(newRequestId);
++		AnalysisRequestTracker tracker = createTracker(requestWithServiceSequence, response);
++		// if request is invalid, response was already modified and null is returned
++		if (tracker != null) {
++			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.IN_DISCOVERY_SERVICE_QUEUE);
++			logger.log(Level.FINE, "Starting new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
++			store.store(tracker);
++			logger.log(Level.FINEST, "Stored tracker for new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
++			queueManager.enqueue(tracker);
++			logger.log(Level.FINEST, "Tracker enqueued for new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
++		}
++		logger.exiting(CLASSNAME, METHODNAME);
++		return response;
++	}
++
++	public AnalysisRequestStatus getRequestStatus(String requestId) {
++		final String METHODNAME = "getRequestStatus(String)";
++		logger.entering(CLASSNAME, METHODNAME);
++		AnalysisRequestStatus result = new AnalysisRequestStatus();
++		AnalysisRequestTracker tracker = store.query(requestId);
++		if (tracker == null) {
++			result.setState(AnalysisRequestStatus.State.NOT_FOUND);
++		} else {
++			AnalysisRequestStatus.State state = null;
++			switch (tracker.getStatus()) {
++			case INITIALIZED:
++			case IN_DISCOVERY_SERVICE_QUEUE:
++				state = AnalysisRequestStatus.State.QUEUED;
++				break;
++			case ERROR:
++				state = AnalysisRequestStatus.State.ERROR;
++				break;
++			case DISCOVERY_SERVICE_RUNNING:
++				state = AnalysisRequestStatus.State.ACTIVE;
++				break;
++			case FINISHED:
++				state = AnalysisRequestStatus.State.FINISHED;
++				break;
++			case CANCELLED:
++				state = AnalysisRequestStatus.State.CANCELLED;
++			default:
++				;
++			}
++			result.setState(state);
++			result.setDetails(tracker.getStatusDetails());
++			result.setRequest(tracker.getRequest());
++
++			long totalProcessingTime = 0;
++			long totalQueuingTime = 0;
++			long totalTimeSpentStoringAnnotations = 0;
++
++			List<DiscoveryServiceRequest> requests = new ArrayList<DiscoveryServiceRequest>();
++			for (DiscoveryServiceRequest req : tracker.getDiscoveryServiceRequests()) {
++				DiscoveryServiceRequest copyReq = new DiscoveryServiceRequest();
++				copyReq.setDiscoveryServiceId(req.getDiscoveryServiceId());
++				long putOnQueue = req.getPutOnRequestQueue();
++				long startedProcessing = req.getTakenFromRequestQueue();
++				long finishedProcessing = req.getFinishedProcessing();
++
++				totalProcessingTime += (finishedProcessing > 0 ? finishedProcessing - startedProcessing : finishedProcessing);
++				totalQueuingTime += (startedProcessing > 0 ? startedProcessing - putOnQueue : startedProcessing);
++				totalTimeSpentStoringAnnotations += req.getTimeSpentStoringResults();
++
++				copyReq.setFinishedProcessing(finishedProcessing);
++				copyReq.setPutOnRequestQueue(putOnQueue);
++				copyReq.setTakenFromRequestQueue(startedProcessing);
++				requests.add(copyReq);
++			}
++
++			result.setTotalTimeOnQueues(totalQueuingTime);
++			result.setTotalTimeProcessing(totalProcessingTime);
++			result.setTotalTimeStoringAnnotations(totalTimeSpentStoringAnnotations);
++			result.setServiceRequests(requests);
++		}
++		logger.log(Level.FINE, "Returning request status object {0}", JSONUtils.lazyJSONSerializer(result));
++		logger.exiting(CLASSNAME, METHODNAME);
++		return result;
++	}
++
++	public AnalysisCancelResult cancelRequest(String requestId) {
++		final String METHODNAME = "cancelRequest(String)";
++		logger.entering(CLASSNAME, METHODNAME);
++
++		AnalysisCancelResult result = new AnalysisCancelResult();
++		result.setState(AnalysisCancelResult.State.NOT_FOUND);
++
++		AnalysisRequestTracker request = store.query(requestId);
++		//TODO implement cancellation of running instead of only queued requests.
++		if (request != null) {
++			if (TrackerUtil.isCancellable(request)) {
++				request.setStatus(AnalysisRequestTrackerStatus.STATUS.CANCELLED);
++				store.store(request);
++				logger.info("cancelled request with id " + requestId);
++				result.setState(AnalysisCancelResult.State.SUCCESS);
++			} else {
++				logger.log(Level.FINER, "Request ''{0}'' could not be cancelled. State ''{1}'', next request number:. ''{2}''", new Object[]{requestId, request.getStatus(), request.getNextDiscoveryServiceRequest()});
++				result.setState(AnalysisCancelResult.State.INVALID_STATE);
++			}
++		}
++		logger.exiting(CLASSNAME, METHODNAME);
++		return result;
++	}
++
++	private AnalysisRequestTracker createTracker(AnalysisRequest request, AnalysisResponse response) {
++		DiscoveryServiceManager discoveryServiceManager = odf.getDiscoveryServiceManager();
++		List<DiscoveryServiceProperties> registeredServices = new ArrayList<>(discoveryServiceManager.getDiscoveryServicesProperties());
++		registeredServices.add(HealthCheckServiceRuntime.getHealthCheckServiceProperties());
++		String currentUser = this.environment.getCurrentUser();
++
++		/*
++		List<MetaDataObjectReference> datasets = request.getDataSets();
++		
++		if (datasets.size() == 1 && datasets.get(0).getId().startsWith(HEALTH_TEST_DATA_SET_ID_PREFIX)) {
++			// health test mode
++			AnalysisRequestTracker healthTestTracker = new AnalysisRequestTracker();
++			DiscoveryServiceRequest dssr = new DiscoveryServiceRequest();
++			dssr.setOdfRequestId(request.getId());
++			dssr.setDiscoveryServiceId(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
++			String odfUrl = new ODFFactory().create().getSettingsManager().getODFSettings().getOdfUrl();
++			dssr.setOdfUrl(odfUrl);
++			MetaDataObjectReference dsr = datasets.get(0);
++			
++			DataSetContainer dataSetContainer = new DataSetContainer();
++			DataSet oMDataSet = new UnknownDataSet();	
++			oMDataSet.setReference(dsr);
++			dataSetContainer.setDataSet(oMDataSet);
++			
++			dssr.setDataSetContainer(dataSetContainer);
++			dssr.setUser(currentUser);
++			dssr.setAdditionalProperties(request.getAdditionalProperties());
++			healthTestTracker.setDiscoveryServiceRequests(Collections.singletonList(dssr));
++			healthTestTracker.setRequest(request);
++			healthTestTracker.setStatus(STATUS.INITIALIZED);
++			Utils.setCurrentTimeAsLastModified(healthTestTracker);
++			healthTestTracker.setUser(currentUser);
++			response.setDetails("Request is a special health test request.");
++			return healthTestTracker;
++		}
++		*/
++
++		List<DiscoveryServiceRequest> startRequests = new ArrayList<DiscoveryServiceRequest>();
++		List<String> discoveryServiceSequence = request.getDiscoveryServiceSequence();
++		if (discoveryServiceSequence != null && !discoveryServiceSequence.isEmpty()) {
++			logger.log(Level.FINE, "Request issued with fixed discovery service sequence: {0}", discoveryServiceSequence);
++			// first check if discoveryService IDs are valid
++			Set<String> foundDSs = new HashSet<String>(discoveryServiceSequence);
++			for (String ds : discoveryServiceSequence) {
++				for (DiscoveryServiceProperties regInfo : registeredServices) {
++					if (regInfo.getId().equals(ds)) {
++						foundDSs.remove(ds);
++					}
++				}
++			}
++			// if there are some IDs left that were not found 
++			if (!foundDSs.isEmpty()) {
++				String msg = MessageFormat.format("The discovery services {0} could not be found", Utils.collectionToString(foundDSs, ","));
++				logger.log(Level.WARNING, msg);
++				response.setInvalidRequest(true);
++				response.setDetails(msg);
++				return null;
++			}
++
++			// for each data set process all discovery services
++			// (possible alternative, not used here: for all discovery services process each data set)
++			for (MetaDataObjectReference dataSetId : request.getDataSets()) {
++				MetaDataObject mdo = null;
++				if (dataSetId.getId().startsWith(HEALTH_TEST_DATA_SET_ID_PREFIX)) {
++					mdo = new UnknownDataSet();
++					mdo.setReference(dataSetId);
++				} else {
++					mdo = odf.getMetadataStore().retrieve(dataSetId);
++				}
++				if (mdo == null) {
++					String msg = MessageFormat.format("The meta data object id ''{0}'' does not reference an existing metadata object. Request will be set to error.", dataSetId.toString());
++					logger.log(Level.WARNING, msg);
++					response.setInvalidRequest(true);
++					response.setDetails(msg);
++					return null;
++				}
++				if (dataSetId.getUrl() == null) {
++					dataSetId.setUrl(mdo.getReference().getUrl());
++				}
++				for (String ds : discoveryServiceSequence) {
++					DiscoveryServiceRequest req = new DiscoveryServiceRequest();
++					DataSetContainer dataSetContainer = new DataSetContainer();
++					dataSetContainer.setDataSet(mdo);
++					req.setDataSetContainer(dataSetContainer);
++					req.setOdfRequestId(request.getId());
++					req.setDiscoveryServiceId(ds);
++					req.setUser(currentUser);
++					req.setAdditionalProperties(request.getAdditionalProperties());
++					String odfUrl = odf.getSettingsManager().getODFSettings().getOdfUrl();
++					req.setOdfUrl(odfUrl);
++					for (DiscoveryServiceProperties dsri : odf.getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
++						if (dsri.getId().equals(ds)) {
++							if (dsri.getEndpoint().getRuntimeName().equals(SparkServiceRuntime.SPARK_RUNTIME_NAME)) {
++								req.setOdfUser(odf.getSettingsManager().getODFSettings().getOdfUser());
++								//Note that the password has to be provided as plain text here because the remote service cannot decrypt it otherwise.
++								//TODO: Consider to provide a temporary secure token instead of the password.
++								req.setOdfPassword(Encryption.decryptText(odf.getSettingsManager().getODFSettings().getOdfPassword()));
++							}
++						}
++					}
++					startRequests.add(req);
++				}
++			}
++		} else {
++			String msg = "The request didn't contain any processing hints. ODF cannot process a request without an analysis sequence.";
++			logger.log(Level.WARNING, msg);
++			response.setInvalidRequest(true);
++			response.setDetails(msg);
++			return null;
++		}
++
++		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
++		tracker.setDiscoveryServiceRequests(startRequests);
++		tracker.setNextDiscoveryServiceRequest(0);
++		tracker.setRequest(request);
++		tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.INITIALIZED);
++		Utils.setCurrentTimeAsLastModified(tracker);
++		tracker.setUser(currentUser);
++		return tracker;
++	}
++	
++	boolean requiresMetaDataCache(DiscoveryService service) {
++		return service instanceof SparkDiscoveryServiceProxy;
++	}
++
++	public static SyncDiscoveryService getDiscoveryServiceProxy(String discoveryServiceId, AnalysisRequest request) {
++		try {
++			ODFInternalFactory factory = new ODFInternalFactory();
++			DiscoveryServiceManager dsm = factory.create(DiscoveryServiceManager.class);
++			DiscoveryServiceProperties serviceProps = null;
++			if (discoveryServiceId.startsWith(HEALTH_TEST_DISCOVERY_SERVICE_ID)) {
++				serviceProps = HealthCheckServiceRuntime.getHealthCheckServiceProperties();
++			} else {
++				serviceProps = dsm.getDiscoveryServiceProperties(discoveryServiceId);
++			}
++			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(discoveryServiceId);
++			if (runtime == null) {
++				throw new RuntimeException(MessageFormat.format("Service runtime for service ''{0}'' was not found.", discoveryServiceId));
++			}
++			DiscoveryService runtimeProxy = runtime.createDiscoveryServiceProxy(serviceProps);
++			SyncDiscoveryService proxy = null;
++			if (runtimeProxy instanceof AsyncDiscoveryService) {
++				proxy = new AsyncDiscoveryServiceWrapper( (AsyncDiscoveryService) runtimeProxy);
++			} else {
++				proxy = (SyncDiscoveryService) runtimeProxy;
++			}
++			proxy.setMetadataStore(factory.create(MetadataStore.class));
++			AnnotationStore as = factory.create(AnnotationStore.class);
++			if (request != null) {
++				as.setAnalysisRun(request.getId());
++			}
++			proxy.setAnnotationStore(as);
++			return proxy;
++		} catch (ServiceNotFoundException exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++
++	/**
++	 * package private helper method that can be called when the current discovery service was finished
++	 * and you want to advance to the next.
++	 * NOTE: This should only be called once for all nodes, i.e., typically from a Kafka consumer
++	 *       that has runs on all nodes with the same consumer group ID.
++	 * 
++	 * @param dsRunID runID is just used for logging, could be any value
++	 * @param dsID
++	 */
++	void advanceToNextDiscoveryService(final AnalysisRequestTracker tracker) {
++		DiscoveryServiceRequest req = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++		DiscoveryServiceResponse resp = TrackerUtil.getCurrentDiscoveryServiceStartResponse(tracker);
++		String dsRunID = "N/A";
++		if (resp instanceof DiscoveryServiceAsyncStartResponse) {
++			dsRunID = ((DiscoveryServiceAsyncStartResponse) resp).getRunId();
++		}
++		String dsID = req.getDiscoveryServiceId();
++
++		TrackerUtil.moveToNextDiscoveryService(tracker);
++		DiscoveryServiceRequest nextDSReq = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++		if (nextDSReq == null) {
++			logger.log(Level.FINER, "DSWatcher: Run ''{0}'' of DS ''{1}'' was last of request ''{2}'', marking overall request as finished",
++					new Object[] { dsRunID, dsID, tracker.getRequest().getId() });
++			// overall request is finished
++			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.FINISHED);
++			tracker.setStatusDetails("All discovery services ran successfully");
++			
++			// now propagate annotations if configured
++			logger.log(Level.FINE, "Request is finished, checking for annotation propagation");
++			Boolean doPropagation = odf.getSettingsManager().getODFSettings().getEnableAnnotationPropagation();
++			if (Boolean.TRUE.equals(doPropagation)) {
++				TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++				try {
++					transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++						
++						@Override
++						public Object call() throws Exception {
++							AnnotationPropagator ap = odf.getMetadataStore().getAnnotationPropagator();
++							if (ap != null) {
++								logger.log(Level.FINE, "Annotation Propagator exists, running propagation");
++								try {
++									ap.propagateAnnotations(new ODFFactory().create().getAnnotationStore(), tracker.getRequest().getId());
++								} catch(Exception exc) {
++									logger.log(Level.SEVERE, "An unexcepted exception occurred while propagating annotations", exc);
++									tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.ERROR);
++									String msg = MessageFormat.format("An unexpected exception occured while propagating annotations: ''{0}''", Utils.getExceptionAsString(exc));
++									tracker.setStatusDetails(msg);
++								}
++							}
++							return null;
++						}
++					});
++				} catch (Exception e) {
++					// should never happen as exception is handled inside the callable
++					throw new RuntimeException(e);
++				}
++			}
++		} else {
++			logger.log(Level.FINER, "DSWatcher: Run ''{0}'' of DS ''{1}'' was not the last of request ''{2}'', moving over to next request",
++					new Object[] { dsRunID, dsID, tracker.getRequest().getId() });
++			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.IN_DISCOVERY_SERVICE_QUEUE);
++			queueManager.enqueue(tracker);
++		}
++		Utils.setCurrentTimeAsLastModified(tracker);
++		store.store(tracker);
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java
+new file mode 100755
+index 0000000..9b16270
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java
+@@ -0,0 +1,279 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.Comparator;
++import java.util.LinkedHashSet;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import java.text.MessageFormat;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++
++/**
++*
++* Maps a list of {@link AnnotationType} objects to a list of service ids representing concrete discovery
++* services that generate the requested annotation types.
++* 
++* Internally, this class generates a list of all possible combinations of discovery services which may be
++* used to generate the requested annotation types. The combinations are then assessed and ordered by the
++* expected execution effort and the one with the least execution effort is provided. 
++*
++*/
++public class DeclarativeRequestMapper {
++
++	private Logger logger = Logger.getLogger(DeclarativeRequestMapper.class.getName());
++
++	DiscoveryServiceManager dsManager = new ODFFactory().create().getDiscoveryServiceManager();
++	List<DiscoveryServiceProperties> dsPropList = dsManager.getDiscoveryServicesProperties();
++
++	private List<DiscoveryServiceSequence> discoveryServiceSequences = new ArrayList<DiscoveryServiceSequence>();
++
++	public DeclarativeRequestMapper(AnalysisRequest request) {
++		String messageText = "Generating possible discovery service sequences for annotation types {0}.";
++		logger.log(Level.INFO, MessageFormat.format(messageText, request.getAnnotationTypes()));
++
++		this.discoveryServiceSequences = calculateDiscoveryServiceSequences(request.getAnnotationTypes());
++		Collections.sort(this.discoveryServiceSequences, new EffortComparator());
++	}
++
++	/**
++	*
++	* Represents a single discovery service sequence.
++	*
++	*/
++	public class DiscoveryServiceSequence {
++		private LinkedHashSet<String> serviceSequence;
++
++		public DiscoveryServiceSequence() {
++			this.serviceSequence = new LinkedHashSet<String>();
++		}
++
++		public DiscoveryServiceSequence(LinkedHashSet<String> serviceIds) {
++			this.serviceSequence = serviceIds;
++		}
++
++		public LinkedHashSet<String> getServiceSequence() {
++			return this.serviceSequence;
++		}
++
++		public List<String> getServiceSequenceAsList() {
++			return new ArrayList<String>(this.serviceSequence);
++		}
++
++		@Override
++		public boolean equals(Object obj) {
++			if ((obj == null) || !(obj instanceof DiscoveryServiceSequence)) {
++				return false;
++			}
++			return this.getServiceSequence().equals(((DiscoveryServiceSequence) obj).getServiceSequence());
++		}
++
++		// Overriding hashCode method to ensure proper results of equals() method
++		// (See of http://www.javaranch.com/journal/2002/10/equalhash.html)
++		@Override
++		public int hashCode() {
++			return Utils.joinStrings(new ArrayList<String>(this.serviceSequence), ',').hashCode();
++		}
++	}
++
++	/**
++	*
++	* Internal class that estimates the effort for executing a sequence of discovery services.
++	* Should be extended to take runtime statistics into account. 
++	*
++	*/
++	private class EffortComparator implements Comparator<DiscoveryServiceSequence> {
++		public int compare(DiscoveryServiceSequence da1, DiscoveryServiceSequence da2) {
++			if (da1.getServiceSequence().size() < da2.getServiceSequence().size()) {
++				return -1;
++			} else if (da1.getServiceSequence().size() > da2.getServiceSequence().size()) {
++				return 1;
++			} else {
++				return 0;
++			}
++		}
++	}
++
++	/**
++	 * Returns the calculated list of discovery service sequences ordered by the execution effort,
++	 * starting with the sequence that is supposed to cause the minimum execution effort.
++	 *
++	 * @return List of discovery service sequences
++	 */
++	public List<DiscoveryServiceSequence> getDiscoveryServiceSequences() {
++		return this.discoveryServiceSequences;
++	}
++
++	/**
++	 * Returns recommended discovery service sequence, i.e. the one that is supposed to cause the
++	 * minimum execution effort.
++	 *
++	 * @return Discovery service sequence
++	 */
++	public List<String> getRecommendedDiscoveryServiceSequence() {
++		if (!getDiscoveryServiceSequences().isEmpty()) {
++			return new ArrayList<String>(this.discoveryServiceSequences.get(0).getServiceSequence());
++		} else {
++			return null;
++		}
++	}
++
++	/**
++	 * Remove all discovery service sequences that contain a specific service id. Use this method
++	 * to update the list of discovery service sequences after a specific discovery service has
++	 * failed and should not be used any more.
++	 *
++	 * @param serviceId Id of discovery service to be removed
++	 * @return Discovery service sequence
++	 */
++	public boolean removeDiscoveryServiceSequences(String serviceId) {
++		boolean serviceRemoved = false;
++		List<DiscoveryServiceSequence> updatedList = new ArrayList<DiscoveryServiceSequence>();
++		updatedList.addAll(this.discoveryServiceSequences);
++		for (DiscoveryServiceSequence sequence : this.discoveryServiceSequences) {
++			if (sequence.getServiceSequence().contains(serviceId)) {
++				updatedList.remove(sequence);
++				serviceRemoved = true;
++			}
++		}
++		this.discoveryServiceSequences = updatedList;
++		return serviceRemoved ? true : false;
++	}
++
++	/**
++	 * Internal method that determines all possible sequences of discovery services which could be used
++	 * to generate the requested annotation type. Using recursion, all levels of prerequisites are taken
++	 * into account.
++	 *
++	 * @param annotationType Annotation type to be generated
++	 * @return List of discovery service sequences that generate the requested annotation type
++	 */
++	private List<DiscoveryServiceSequence> getDiscoveryServiceSequencesForAnnotationType(String annotationType) {
++		List<DiscoveryServiceSequence> result = new ArrayList<DiscoveryServiceSequence>();
++		for (DiscoveryServiceProperties dsProps : this.dsPropList) {
++			if ((dsProps.getResultingAnnotationTypes() != null) && dsProps.getResultingAnnotationTypes().contains(annotationType)) {
++				DiscoveryServiceSequence da = new DiscoveryServiceSequence();
++				da.getServiceSequence().add(dsProps.getId());
++				List<DiscoveryServiceSequence> discoveryApproachesForService = new ArrayList<DiscoveryServiceSequence>();
++				discoveryApproachesForService.add(da);
++
++				// If there are prerequisite annotation types, also merge their services into the result
++				if ((dsProps.getPrerequisiteAnnotationTypes() != null)
++						&& !dsProps.getPrerequisiteAnnotationTypes().isEmpty()) {
++					discoveryApproachesForService = combineDiscoveryServiceSequences(
++							calculateDiscoveryServiceSequences(dsProps.getPrerequisiteAnnotationTypes()),
++							discoveryApproachesForService);
++					;
++				}
++				logger.log(Level.INFO, "Discovery appoaches for annotationType " + annotationType + ":");
++				for (DeclarativeRequestMapper.DiscoveryServiceSequence discoveryApproach : discoveryApproachesForService) {
++					logger.log(Level.INFO,
++							Utils.joinStrings(new ArrayList<String>(discoveryApproach.getServiceSequence()), ','));
++				}
++
++				result.addAll(discoveryApproachesForService);
++			}
++		}
++		return result;
++	}
++
++	/**
++	 * Internal method that combines two lists of discovery service sequences by generating all possible
++	 * combinations of the entries of both lists. The methods avoids duplicate services in each sequence
++	 * and duplicate sequences in the resulting list.
++	 *
++	 * @param originalSequences Original list of discovery service sequences
++	 * @param additionalSequences Second list discovery service sequences
++	 * @return Combined list of discovery service sequences
++	 */
++	private List<DiscoveryServiceSequence> combineDiscoveryServiceSequences(List<DiscoveryServiceSequence> originalSequences, List<DiscoveryServiceSequence> additionalSequences) {
++		// Example scenario for combining service sequences:
++		//
++		// Lets assume a service S that generates two annotation types AT1 and AT2 and S has prerequisite
++		// annotation type AT_P. There are two services P1 and P2 creating annotation type AT_P.
++		// The possible service sequences for generating annotation type AT1 are "P1, S" and "P2, S", same for AT2.
++		//
++		// When requesting a set of annotation types AT1 and AT2, this will result in the following four combinations
++		// which contain several redundancies:
++		// "P1, S, P1, S", "P1, S, P2, S", "P2, S, P1, S", "P2, S, P2, S"
++		// 
++		// This method uses three ways of removing redundancies:
++		//
++		// 1. Given that class DiscoveryServiceSequence internally uses LinkedHashSet, duplicate services are removed from the
++		// service sequences, resulting in: "P1, S", "P1, S, P2", "P2, S, P1", "P2, S"
++		//
++		// 2. Service sequences are only merged if the last service of the additional sequence is not already part of the original
++		// one which results in: "P1, S", "P1, S", "P2, S", "P2, S"
++		// 
++		// 3. Duplicate sequences are ignored, resulting in: "P1, S", "P2, S" which is the final result.  
++
++		List<DiscoveryServiceSequence> discoveryApproaches = new ArrayList<DiscoveryServiceSequence>();
++		for (DiscoveryServiceSequence da1 : originalSequences) {
++			for (DiscoveryServiceSequence da2 : additionalSequences) {
++				DiscoveryServiceSequence da = new DiscoveryServiceSequence();
++				da.getServiceSequence().addAll(da1.getServiceSequence());
++
++				// Add the second list only if its last serviceId is not already part of the first list
++				// (Otherwise unnecessary prerequisite services might be added, because the 2nd list may use different ones)
++				if (!da1.getServiceSequence().contains(da2.getServiceSequenceAsList().get(da2.getServiceSequenceAsList().size() - 1))) {
++					da.getServiceSequence().addAll(da2.getServiceSequence());
++				}
++
++				// Avoid duplicate entries (uses DiscoveryServiceSequence.equals() method)
++				if (!discoveryApproaches.contains(da)) {
++					discoveryApproaches.add(da);
++				}
++			}
++		}
++		return discoveryApproaches;
++	}
++
++	/**
++	 * Internal method that determines all possible sequences of discovery services which could be used
++	 * to generate a set of requested annotation types.
++	 *
++	 * Each discovery service creates one or multiple annotation types and may have prerequisite annotation types.
++	 * As there may be multiple services creating the same annotation type (maybe by using different prerequisite
++	 * annotation types), this may result in a complex dependencies. Using recursion, this method iterates through 
++	 * all the dependencies in order to calculate a list of all possible sequences of discovery services that could
++	 * be used to calculate the requested annotation types.
++	 * 
++	 * @param annotationTypes List of annotation types to be generated
++	 * @return List of discovery service sequences that generate the requested annotation types
++	 */
++	private List<DiscoveryServiceSequence> calculateDiscoveryServiceSequences(List<String> annotationTypes) {
++		List<DiscoveryServiceSequence> result = null;
++
++		for (String currentType : annotationTypes) {
++			// Calculate discovery sequences for current annotation type
++			List<DiscoveryServiceSequence> additionalDiscoveryApproaches = getDiscoveryServiceSequencesForAnnotationType(currentType);
++			if (result == null) {
++				result = additionalDiscoveryApproaches;
++			} else {
++				// Merge with discovery sequences determined for the previous annotation types in the list 
++				result = combineDiscoveryServiceSequences(result, additionalDiscoveryApproaches);
++			}
++		}
++		return result;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java
+new file mode 100755
+index 0000000..20b7661
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java
+@@ -0,0 +1,478 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.ArrayList;
++import java.util.Date;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.LinkedHashMap;
++import java.util.LinkedList;
++import java.util.List;
++import java.util.ListIterator;
++import java.util.Map;
++import java.util.Map.Entry;
++import java.util.Properties;
++import java.util.Set;
++import java.util.UUID;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++
++/**
++ * This class is an in-memory store for both request trackers (showing the status of analysis requests) as well as
++ * a for annotations. Both trackers and annotations are put on the ODF status queue which 
++ * (a) stores as "semi"-persistent store ("semi" because Kafka's retention mechanism will evantuall delete them), and
++ * (b) a way to propagate those changes to other ODF nodes.
++ * The annotations and trackers themselves are stored in memory in static variables.
++ * 
++ * This is how it works:
++ * 1. A single consumer thread listens on the status topic
++ * 2. If an incoming status queue entry is a tracker, it stores it in the in-memory tracker store
++ *    If it is an annotation, it stores it in the in-memory annotation store
++ * 3. Queries for trackers and annotations only go against the in-memory stores
++ * 4. When a check for overaged entries occurs (a check that removes trackers form the store which are older than the queue retention time)
++ *    the annotations for overaged and finished requests are also deleted (see removeOveragedEntries())
++ *   
++ *    
++ *
++ */
++public class DefaultStatusQueueStore implements AnalysisRequestTrackerStore, AnnotationStore {
++
++	static Logger logger = Logger.getLogger(DefaultStatusQueueStore.class.getName());
++	
++	public static final long IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS = 5000;
++	
++	static Object globalRequestStoreMapLock = new Object();
++	
++	/*
++	 * http://docs.oracle.com/javase/7/docs/api/java/util/LinkedHashMap.html
++	 * 
++	 * A structural modification is any operation that adds or deletes one or more mappings or, in the case of access-ordered linked hash maps, affects iteration order. 
++	 * In insertion-ordered linked hash maps, merely changing the value associated with a key that is already contained in the map is not a structural modification. 
++	 * In access-ordered linked hash maps, merely querying the map with get is a structural modification.) 
++	 */
++	static LinkedHashMap<String, AnalysisRequestTracker> globalRequestStoreMap = new LinkedHashMap<String, AnalysisRequestTracker>();
++	
++	/*
++	 * This map is only used to track if storing an object was successful
++	 *  
++	 */
++	static ConcurrentHashMap<String, Boolean> globalStoreSuccessMap = new ConcurrentHashMap<String, Boolean>();
++		
++	private String analysisRun;
++	
++	// simplest implementation for now: just keep a simple list
++	private static List<Annotation> storedAnnotations = new LinkedList<>();
++	private static Object storedAnnotationsLock = new Object();
++
++	/**
++	 * This processor reads trackers from the queue and stores it in the globalRequestStoreMap.
++	 * The thread for this processor is created in the QueueManager implementation.
++	 *
++	 */
++	public static class StatusQueueProcessor implements QueueMessageProcessor {
++		Logger logger = Logger.getLogger(StatusQueueProcessor.class.getName());
++
++		@Override
++		public void process(ExecutorService executorService, String message, int partition, long offset) {
++			StatusQueueEntry sqe = new StatusQueueEntry();
++			try {
++				sqe = JSONUtils.fromJSON(message, StatusQueueEntry.class);
++			} catch (Exception e) {
++				logger.log(Level.WARNING, "Entry in status queue could not be processed", e);
++			}
++			
++			// first handle trackers and / or initial cleanup
++			synchronized (globalRequestStoreMapLock) {
++				if (sqe.getAnalysisRequestTracker() != null) {
++					try {
++						AnalysisRequestTracker tracker = sqe.getAnalysisRequestTracker();
++						String requestID = tracker.getRequest().getId();
++						logger.log(Level.FINEST, "Store status queue: found tracker with id ''{0}'', tracker: {1}", new Object[] { requestID, message });
++						if (tracker.getStatus() == STATUS.FINISHED) {
++							logger.log(Level.INFO, "Request with id ''{0}'' is finished, result: {1}", new Object[] { requestID, message });
++						}
++						//remove item so that it is added to the end of the list.
++						if (globalRequestStoreMap.containsKey(requestID)) {
++							globalRequestStoreMap.remove(requestID);
++						}
++
++						globalRequestStoreMap.put(requestID, tracker);
++						if (tracker != null && tracker.getRevisionId() != null) {
++							globalStoreSuccessMap.put(tracker.getRevisionId(), true);
++						}
++
++					} catch (Exception e) {
++						logger.log(Level.WARNING, "Tracker entry in status queue could not be processed", e);
++					}
++				} 				
++			}
++			
++			if (sqe.getAnnotation() != null) {
++				Annotation annot = sqe.getAnnotation();
++				logger.log(Level.FINEST, "Received annotationk over status queue: ''{0}''", annot.getReference().getId());
++				synchronized (storedAnnotationsLock) {
++					storedAnnotations.add(annot);
++					globalStoreSuccessMap.put(annot.getReference().getId(), true);
++				}
++			}
++
++			removeOveragedEntries();
++		}
++
++	}
++
++	/////////////////////////////////////////////
++	// AnalysisRequestTrackerStore interface implementation
++
++	
++	/*
++	 * This store uses the lastModified timestamp to remove overaged trackers. 
++	 * Therefore, the lastModified timestamp MUST be set before storing anything and prevent unwanted removal
++	 */
++	@Override
++	public void store(AnalysisRequestTracker tracker) {
++		String id = tracker.getRequest().getId();
++		logger.fine("Store " + id + " in trackerStore");
++
++		String revId = UUID.randomUUID() + "_" + System.currentTimeMillis();
++		tracker.setRevisionId(revId);
++		globalStoreSuccessMap.put(revId, false);
++		
++		ODFInternalFactory factory = new ODFInternalFactory();
++		DiscoveryServiceQueueManager qm = factory.create(DiscoveryServiceQueueManager.class);
++		// put the tracker onto the status queue, the actual map that is used in query() is filled by the ARTProcessor listening on the status queue
++		StatusQueueEntry sqe = new StatusQueueEntry();
++		sqe.setAnalysisRequestTracker(tracker);
++		qm.enqueueInStatusQueue(sqe);
++		waitUntilEntryArrives(revId);
++	}
++
++	private void waitUntilEntryArrives(String entryId) {
++		boolean found = false;
++		int maxNumWaits = 1500;
++		int sleepMS = 20;
++		while (maxNumWaits > 0) {
++			final Boolean storageSuccess = globalStoreSuccessMap.get(entryId);
++			if (storageSuccess != null && storageSuccess == true) {
++				found = true;
++				globalStoreSuccessMap.remove(entryId);
++				break;
++			}
++			try {
++				Thread.sleep(sleepMS);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++			}
++			maxNumWaits--;
++		}
++		if(!found){
++			final String message = "The tracker could not be stored in 30 sec!";
++			logger.warning(message);
++			throw new RuntimeException(message);
++		}else{
++			logger.fine("Tracker stored after " + ((1500 - maxNumWaits) * sleepMS) + " ms");
++		}
++	}
++
++	@Override
++	public AnalysisRequestTracker query(String analysisRequestId) {
++		logger.fine("Querying store for " + analysisRequestId);
++		synchronized (globalRequestStoreMapLock) {
++			AnalysisRequestTracker tracker = globalRequestStoreMap.get(analysisRequestId);
++			return tracker;
++		}
++	}
++	
++	@Override
++	public void clearCache() {
++		logger.fine("Clearing store cache");
++		synchronized (globalRequestStoreMapLock) {
++			globalRequestStoreMap.clear();
++		}
++	}
++	
++	private static void removeOveragedEntries(){
++		Set<String> finishedRequests = new HashSet<>();
++		logger.fine("Removing overaged entries from store");
++		synchronized (globalRequestStoreMapLock) {
++			Iterator<Entry<String, AnalysisRequestTracker>> entryIterator = globalRequestStoreMap.entrySet().iterator();
++			long maxRetentionMS = new ODFFactory().create().getSettingsManager().getODFSettings().getMessagingConfiguration().getAnalysisRequestRetentionMs();
++			long currentTimeMS = System.currentTimeMillis();
++			while(entryIterator.hasNext()){
++				Entry<String, AnalysisRequestTracker> entry = entryIterator.next();
++				AnalysisRequestTracker tracker = entry.getValue();
++				if(currentTimeMS - tracker.getLastModified() >= maxRetentionMS){
++					if (tracker.getStatus() == STATUS.FINISHED || tracker.getStatus() == STATUS.ERROR) {
++						finishedRequests.add(tracker.getRequest().getId());
++					}
++					entryIterator.remove();
++					logger.log(Level.INFO, "Removed overaged status tracker with id ''{0}''", new Object[] { entry.getKey() });
++				}else{
++					/*
++					 * items in a linkedHashMap are ordered in the way they were put into the map.
++					 * Because of this, if one item is not overaged, all following won't be either
++					*/
++					break;
++				}
++			}
++		}
++		synchronized (storedAnnotationsLock) {
++			ListIterator<Annotation> it = storedAnnotations.listIterator();
++			while (it.hasNext()) {
++				Annotation annot = it.next();
++				if (finishedRequests.contains(annot.getAnalysisRun())) {
++					it.remove();
++				}
++			}
++		}
++	}
++
++	@Override
++	public int getSize() {
++		synchronized (globalRequestStoreMapLock) {
++			return globalRequestStoreMap.keySet().size();
++		}
++	}
++	
++	@Override
++	public AnalysisRequestTracker findSimilarQueuedRequest(AnalysisRequest request) {
++		synchronized (globalRequestStoreMapLock) {
++			for (AnalysisRequestTracker tracker : globalRequestStoreMap.values()) {
++				long startedAfterLimit = System.currentTimeMillis() - IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS;
++				if (TrackerUtil.isAnalysisWaiting(tracker) || 
++						(tracker.getNextDiscoveryServiceRequest() == 0 && tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING && tracker.getLastModified() >= startedAfterLimit)) {
++					AnalysisRequest otherRequest = tracker.getRequest();
++					List<MetaDataObjectReference> dataSets = request.getDataSets();
++					List<MetaDataObjectReference> otherDataSets = otherRequest.getDataSets();
++					
++					if (otherDataSets.containsAll(dataSets) && tracker.getDiscoveryServiceRequests().get(0).getDiscoveryServiceId().equals(
++							request.getDiscoveryServiceSequence().get(0))) {
++						logger.log(Level.FINEST, "Found similar request for request {0}", new Object[] { request.getId()});
++						return tracker;
++					}
++				}
++			}
++			return null;
++		}
++	}
++
++	
++	@Override
++	public List<AnalysisRequestTracker> getRecentTrackers(int offset, int limit) {
++		if (offset < 0) {
++			throw new RuntimeException("Offset parameter cannot be negative.");
++		}
++		if (limit < -1) {
++			throw new RuntimeException("Limit parameter cannot be smaller than -1.");
++		}
++		synchronized (globalRequestStoreMapLock) {
++			List<AnalysisRequestTracker> arsList = new ArrayList<>();
++			Iterator<Map.Entry<String, AnalysisRequestTracker>> it = globalRequestStoreMap.entrySet().iterator();
++			// filter out health check requests
++			while (it.hasNext()) {
++				AnalysisRequestTracker t = it.next().getValue();
++				if (!t.getRequest().getDataSets().get(0).getId().startsWith(ControlCenter.HEALTH_TEST_DATA_SET_ID_PREFIX)) {
++					arsList.add(t);
++				}
++			}
++			// now pick number many requests from the end
++			List<AnalysisRequestTracker> result = new ArrayList<>();
++			if (arsList.size() > offset) {
++				int startIndex = arsList.size() - offset - limit;
++				if (limit == -1 || startIndex < 0) {
++					startIndex = 0;
++				}
++				int endIndex = arsList.size() - offset - 1;
++				if (endIndex < 0) {
++					endIndex = 0;
++				}
++				for (int i=endIndex ; i>=startIndex; i--) {
++					result.add(arsList.get(i));
++				}
++			}
++			return result;
++		}
++	}
++	
++	@Override
++	public AnalysisRequestSummary getRequestSummary() {
++		synchronized (globalRequestStoreMapLock) {
++			try {
++				List<AnalysisRequestTracker> recentTrackers = this.getRecentTrackers(0, -1);
++				int totalSuccess = 0;
++				int totalFailure = 0;
++	
++				for (AnalysisRequestTracker tracker : recentTrackers) {
++					if (STATUS.FINISHED.equals(tracker.getStatus())) {
++						totalSuccess++;
++					} else if (STATUS.ERROR.equals(tracker.getStatus())) {
++						totalFailure++;
++					}
++				}
++				return new AnalysisRequestSummary(totalSuccess, totalFailure);
++			} catch (Exception exc) {
++				throw new RuntimeException(exc);
++			}
++		}	
++	}
++
++	/////////////////////////////////////////////
++	// AnnotationStore interface implementation
++	
++	@Override
++	public Properties getProperties() {
++		Properties props = new Properties();
++		props.put(STORE_PROPERTY_TYPE, "DefaultAnnotationStore");
++		props.put(STORE_PROPERTY_ID, getRepositoryId());
++		props.put(STORE_PROPERTY_DESCRIPTION, "A default in-memory implementation of the annotation store storing its results via Kafka");
++		return props;
++	}
++
++	@Override
++	public String getRepositoryId() {
++		return "ODFDefaultAnnotationStore";
++	}
++
++	@Override
++	public ConnectionStatus testConnection() {
++		return ConnectionStatus.OK;
++	}
++
++	@Override
++	public MetaDataObjectReference store(Annotation annotation) {
++		// clone object
++		try {
++			annotation = JSONUtils.cloneJSONObject(annotation);
++		} catch (JSONException e) {
++			logger.log(Level.SEVERE, "Annotation could not be stored because JSON conversion failed.", e);
++			throw new RuntimeException(e);
++		}
++		
++		// create a new reference
++		String annotId = "Annot" + UUID.randomUUID() + "_" + System.currentTimeMillis();
++		logger.log(Level.FINEST, "Storing annotation with ID ''{0}''", annotId);
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId(annotId);
++		ref.setRepositoryId(getRepositoryId());
++		annotation.setReference(ref);
++		if (analysisRun != null) {
++			annotation.setAnalysisRun(analysisRun);
++		}
++		
++		// re-use mechanism from status queue to wait until message has arrived via Kafka
++		globalStoreSuccessMap.put(annotId, false);
++		DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
++		StatusQueueEntry sqe = new StatusQueueEntry();
++		sqe.setAnnotation(annotation);
++		qm.enqueueInStatusQueue(sqe);
++		waitUntilEntryArrives(annotId);
++		return ref;
++	}
++
++	@Override
++	public List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId) {
++		List<Annotation> results = new ArrayList<>();
++		synchronized (storedAnnotationsLock) {
++			logger.log(Level.FINEST, "Number of annotations stored: ''{0}''", storedAnnotations.size());
++			ListIterator<Annotation> it = storedAnnotations.listIterator();
++			while (it.hasNext()) {
++				Annotation annot = it.next();
++				boolean match = true;
++				if (object != null) {
++					match = match && object.equals(AnnotationStoreUtils.getAnnotatedObject(annot));
++				}
++				if (annot.getAnalysisRun() != null) {
++					// analysisRun is not set for health check and for some of the tests
++					if (analysisRequestId != null) {
++						match &= annot.getAnalysisRun().equals(analysisRequestId);
++					}
++				}
++				if (match) {
++					results.add(annot);
++				}
++			}
++		}
++		logger.log(Level.FINEST, "Number of annotations found for request Id ''{0}'': ''{1}''", new Object[]{analysisRequestId, results.size()});
++		return results;
++	}
++
++	@Override
++	public void setAnalysisRun(String analysisRun) {
++		this.analysisRun = analysisRun;
++	}
++
++	@Override
++	public String getAnalysisRun() {
++		return this.analysisRun;
++	}
++
++	@Override
++	public Annotation retrieveAnnotation(MetaDataObjectReference ref) {
++		synchronized (storedAnnotationsLock) {
++			logger.log(Level.FINEST, "Number of annotations stored: ''{0}''", storedAnnotations.size());
++			ListIterator<Annotation> it = storedAnnotations.listIterator();
++			while (it.hasNext()) {
++				Annotation annot = it.next();
++				if (annot.getReference().equals(ref)) {
++					return annot;
++				}
++			}
++		}
++		return null;
++	}
++
++	@Override
++	public void setStatusOfOldRequest(long cutOffTimestamp, STATUS status, String detailsMessage) {
++		synchronized (globalRequestStoreMapLock) {
++			DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
++			for (AnalysisRequestTracker tracker : globalRequestStoreMap.values()) {
++				if (tracker.getLastModified() < cutOffTimestamp //
++						&& (STATUS.DISCOVERY_SERVICE_RUNNING.equals(tracker.getStatus()) //
++								|| STATUS.IN_DISCOVERY_SERVICE_QUEUE.equals(tracker.getStatus()) //
++								|| STATUS.INITIALIZED.equals(tracker.getStatus()) //
++						)) {
++					// set the tracker in-memory to have the result available immediately
++					tracker.setStatus(status);
++					if (detailsMessage == null) {
++						detailsMessage = "Setting request to " + status + " because it was last modified before " + new Date(cutOffTimestamp);
++					}
++					tracker.setStatusDetails(detailsMessage);
++					// put tracker onto queue
++					StatusQueueEntry sqe = new StatusQueueEntry();
++					sqe.setAnalysisRequestTracker(tracker);
++					qm.enqueueInStatusQueue(sqe);
++				}
++			}
++		}
++		
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java
+new file mode 100755
+index 0000000..0ea909f
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java
+@@ -0,0 +1,276 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.lang.Thread.State;
++import java.lang.Thread.UncaughtExceptionHandler;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++import java.util.Map.Entry;
++import java.util.Set;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++
++public class DefaultThreadManager implements ThreadManager {
++
++	private Logger logger = Logger.getLogger(DefaultThreadManager.class.getName());
++
++	static Object unmanagedThreadLock = new Object();
++	static Map<String, Thread> unmanagedThreadMap = new HashMap<String, Thread>();
++	static Map<String, ODFRunnable> unmanagedThreadRunnableMap = new HashMap<String, ODFRunnable>();
++	
++	ExecutorService executorService;
++
++	public DefaultThreadManager() {
++	}
++	
++	private boolean isThreadRunning(Thread thread) {
++		return thread.getState() != State.TERMINATED;
++	}
++	
++	private void purgeTerminatedThreads() {
++		List<String> entriesToBeRemoved = new ArrayList<String>();
++		List<String> entriesToBeKept = new ArrayList<String>();
++		for (Map.Entry<String, Thread> entry : unmanagedThreadMap.entrySet()) {
++			if (!isThreadRunning(entry.getValue())) {
++				entriesToBeRemoved.add(entry.getKey());
++			} else {
++				entriesToBeKept.add(entry.getKey());
++			}
++		}
++		for (String id : entriesToBeRemoved) {
++			unmanagedThreadMap.remove(id);
++			unmanagedThreadRunnableMap.remove(id);
++		}
++		logger.finer("Removed finished threads: " + entriesToBeRemoved.toString());
++		logger.finer("Kept unfinished threads: " + entriesToBeKept.toString());
++	}
++	
++	@Override
++	public ThreadStartupResult startUnmanagedThread(final String id, final ODFRunnable runnable) {
++		ThreadStartupResult result = new ThreadStartupResult(id) {
++			@Override
++			public boolean isReady() {
++				synchronized (unmanagedThreadLock) {
++					if (unmanagedThreadRunnableMap.containsKey(id)) {
++						return unmanagedThreadRunnableMap.get(id).isReady();
++					}
++				}
++				return false;
++			}
++		};
++		synchronized (unmanagedThreadLock) {
++			purgeTerminatedThreads();
++			Thread t = unmanagedThreadMap.get(id);
++			if (t != null) {
++				if (isThreadRunning(t)) {
++					return result;
++				}
++			} 
++			runnable.setExecutorService(executorService);
++
++			Thread newThread = new Thread(runnable);
++			result.setNewThreadCreated(true);
++			newThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
++
++				@Override
++				public void uncaughtException(Thread thread, Throwable throwable) {
++					logger.log(Level.WARNING, "Uncaught exception in thread " + id + " - Thread will shutdown!", throwable);
++					synchronized (unmanagedThreadLock) {
++						purgeTerminatedThreads();
++					}
++				}
++			});
++
++			newThread.setDaemon(true); // TODO is it a daemon?
++			newThread.start();
++			unmanagedThreadMap.put(id, newThread);
++			unmanagedThreadRunnableMap.put(id,  runnable);
++		}
++		return result;
++	}
++
++	@Override
++	public ThreadStatus.ThreadState getStateOfUnmanagedThread(String id) {
++		synchronized (unmanagedThreadLock) {
++			Thread t = unmanagedThreadMap.get(id);
++			if (t == null) {
++				return ThreadStatus.ThreadState.NON_EXISTENT;
++			}
++			Thread.State ts = t.getState();
++			switch (ts) {
++			case TERMINATED:
++				return ThreadStatus.ThreadState.FINISHED;
++			default:
++				return ThreadStatus.ThreadState.RUNNING;
++			}
++		}
++	}
++
++
++
++	@Override
++	public void setExecutorService(ExecutorService executorService) {
++		this.executorService = executorService;
++	}
++
++	@Override
++	public void shutdownAllUnmanagedThreads() {
++		synchronized (unmanagedThreadLock) {
++			logger.log(Level.INFO, "Shutting down all ODF threads...");
++			for (String id : unmanagedThreadMap.keySet()) {
++				shutdownThreadImpl(id, false);
++			}
++			unmanagedThreadMap.clear();
++			unmanagedThreadRunnableMap.clear();
++			logger.log(Level.INFO, "All ODF threads shutdown");
++			purgeTerminatedThreads();
++		}		
++	}
++	
++	public void shutdownThreads(List<String> names) {
++		synchronized (unmanagedThreadLock) {
++			for (String name : names) {
++				shutdownThreadImpl(name, true);
++			}
++		}		
++	}
++
++	private void shutdownThreadImpl(String id, boolean purge) {
++		Thread t = unmanagedThreadMap.get(id);
++		if (t == null) {
++			return;
++		}
++		ODFRunnable r = unmanagedThreadRunnableMap.get(id);
++		r.cancel();
++		try {
++			Thread.sleep(500);
++		} catch (InterruptedException e1) {
++			e1.printStackTrace();
++		}
++		int max = 60;
++		while (t.getState() != Thread.State.TERMINATED) {
++			if (max == 0) {
++				break;
++			}
++			max--;
++			try {
++				Thread.sleep(1000);
++			} catch (InterruptedException e) {
++				// do nothing
++				e.printStackTrace();
++			}
++		}
++		if (max == 0) {
++			logger.log(Level.WARNING, "Thread {0} did not stop on its own, must be interrupted.", id);
++			t.interrupt();
++		}
++		if (purge) {
++			purgeTerminatedThreads();
++		}
++	}
++
++	@Override
++	public int getNumberOfRunningThreads() {
++		synchronized (unmanagedThreadLock) {
++			int result = 0;
++			for (Thread t : unmanagedThreadMap.values()) {
++				if (isThreadRunning(t)) {
++					result++;
++				}
++			}
++			return result;
++		}
++	}
++
++	@Override
++	public List<ThreadStatus> getThreadManagerStatus() {
++		synchronized (unmanagedThreadLock) {
++			List<ThreadStatus> result = new ArrayList<ThreadStatus>();
++			for (Entry<String, Thread> entry : unmanagedThreadMap.entrySet()) {
++				ThreadStatus status = new ThreadStatus();
++				status.setId(entry.getKey());
++				status.setState(getStateOfUnmanagedThread(entry.getKey()));
++				ODFRunnable odfRunnable = unmanagedThreadRunnableMap.get(entry.getKey());
++				if (odfRunnable != null) {
++					status.setType(odfRunnable.getClass().getName());
++				}
++				result.add(status);
++			}
++
++			return result;
++		}
++	}
++
++	@Override
++	public void waitForThreadsToBeReady(long waitingLimitMs, List<ThreadStartupResult> startedThreads) throws TimeoutException {
++		Set<String> threadsToWaitFor = new HashSet<String>();
++		for (ThreadStartupResult res : startedThreads) {
++			//Only if a new thread was created we wait for it to be ready.
++			if (res.isNewThreadCreated()) {
++				threadsToWaitFor.add(res.getThreadId());
++			}
++		}
++		if (threadsToWaitFor.isEmpty()) {
++			return;
++		}
++
++		final int msToWait = 200;
++		final long maxPolls = waitingLimitMs / msToWait;
++		int count = 0;
++		while (threadsToWaitFor.size() > 0 && count < maxPolls) {
++			List<String> ready = new ArrayList<String>();
++			List<String> notReady = new ArrayList<String>();
++			for (ThreadStartupResult thr : startedThreads) {
++				if (thr.isReady()) {
++					ready.add(thr.getThreadId());
++					threadsToWaitFor.remove(thr.getThreadId());
++				} else {
++					notReady.add(thr.getThreadId());
++				}
++			}
++
++			logger.fine("Ready: " + ready);
++			logger.fine("NotReady: " + notReady);
++
++			try {
++				Thread.sleep(msToWait);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++			}
++			count++;
++		}
++		if (count >= maxPolls) {
++			String msg = "Threads: " + threadsToWaitFor + "' are not ready yet after " + waitingLimitMs + " ms, give up to wait for it";
++			logger.log(Level.WARNING, msg);
++			throw new TimeoutException(msg);
++		}
++		
++		logger.fine("All threads ready after " + (count * msToWait) + "ms");
++	}
++
++	@Override
++	public ODFRunnable getRunnable(String name) {
++		synchronized (unmanagedThreadLock) {
++			return unmanagedThreadRunnableMap.get(name);
++		}
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java
+new file mode 100755
+index 0000000..0f79e0c
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java
+@@ -0,0 +1,29 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.Callable;
++
++/**
++ * The default TransactionContextExecutor runs code in the same thread as the caller.
++ * 
++ */
++public class DefaultTransactionContextExecutor implements TransactionContextExecutor {
++	
++	@Override
++	public Object runInTransactionContext(Callable<Object> callable) throws Exception {
++		return callable.call();
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java
+new file mode 100755
+index 0000000..dbfb597
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java
+@@ -0,0 +1,303 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.text.MessageFormat;
++import java.util.HashMap;
++import java.util.Map;
++import java.util.UUID;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.annotation.InternalAnnotationStoreUtils;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++ * This class processes the entries of a discovery service queue and runs its respective discovery services in a separate thread. 
++ * 
++ */
++public class DiscoveryServiceStarter implements QueueMessageProcessor {
++
++	private Logger logger = Logger.getLogger(DiscoveryServiceStarter.class.getName());
++
++	AnalysisRequestTrackerStore trackerStore = null;
++	ControlCenter controlCenter = null;
++	Environment environment = null;
++	
++	/**
++	 * parameters must be a three element String[] containing the DiscoveryServiceRequest, the partition number (int) and the offset (long).
++	 */
++	public DiscoveryServiceStarter() {
++		ODFInternalFactory factory = new ODFInternalFactory();
++		trackerStore = factory.create(AnalysisRequestTrackerStore.class);
++		controlCenter = factory.create(ControlCenter.class);
++		environment = factory.create(Environment.class);
++	}
++	
++	private DiscoveryServiceRequest cloneDSRequestAndAddServiceProps(DiscoveryServiceRequest request, boolean requiresMetaDataCache) throws JSONException {
++		DiscoveryServiceRequest clonedRequest = JSONUtils.cloneJSONObject(request);
++		Map<String, Object> additionalProps = clonedRequest.getAdditionalProperties();
++		if (additionalProps == null) {
++			additionalProps = new HashMap<>();
++			clonedRequest.setAdditionalProperties(additionalProps);
++		}
++		// add service specific properties
++		String id = request.getDiscoveryServiceId();
++		Map<String, String> serviceProps = environment.getPropertiesWithPrefix(id);
++		additionalProps.putAll(serviceProps);
++		
++		// add cached metadata objects to request if required
++		if (requiresMetaDataCache) {
++			MetaDataObject mdo = request.getDataSetContainer().getDataSet();
++			MetadataStore mds = new ODFInternalFactory().create(MetadataStore.class);
++			clonedRequest.getDataSetContainer().setMetaDataCache(CachedMetadataStore.retrieveMetaDataCache(mds, mdo));
++		}
++
++		return clonedRequest;
++	}
++
++	
++	/**
++	 * starts the service taken from the service runtime topic.
++	 */
++	public void process(ExecutorService executorService, String message, int partition, long offset) {
++		AnalysisRequestTracker tracker = null;
++		try {
++			tracker = JSONUtils.fromJSON(message, AnalysisRequestTracker.class);
++			logger.log(Level.FINEST, "DSStarter: received tracker {0}", JSONUtils.lazyJSONSerializer(tracker));
++			// load tracker from store and check if it was cancelled in the meantime
++			AnalysisRequestTracker storedRequest = trackerStore.query(tracker.getRequest().getId());
++
++			if (storedRequest == null || storedRequest.getStatus() != STATUS.CANCELLED) {
++				// set tracker to running
++				tracker.setStatus(STATUS.DISCOVERY_SERVICE_RUNNING);
++				trackerStore.store(tracker);
++				
++				DiscoveryServiceRequest nextRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++				if (nextRequest == null) {
++					logger.log(Level.WARNING, "Request in queue has wrong format");
++					tracker.setStatus(STATUS.ERROR);
++				} else {
++					nextRequest.setTakenFromRequestQueue(System.currentTimeMillis());
++					trackerStore.store(tracker);
++					String dsID = nextRequest.getDiscoveryServiceId();
++					SyncDiscoveryService nextService = ControlCenter.getDiscoveryServiceProxy(dsID, tracker.getRequest());
++					if (nextService == null) {
++						logger.log(Level.WARNING, "Discovery Service ''{0}'' could not be created", dsID);
++						throw new DiscoveryServiceUnreachableException("Java proxy for service with id " + dsID + " could not be created");
++					} else {
++						DataSetContainer ds = nextRequest.getDataSetContainer();
++						DataSetCheckResult checkResult = nextService.checkDataSet(ds);
++						if (checkResult.getDataAccess() == DataSetCheckResult.DataAccess.NotPossible) {
++							String responseDetails = "";
++							if (checkResult.getDetails() != null) {
++								responseDetails = " Reason: " + checkResult.getDetails();
++							}
++							if (tracker.getRequest().isIgnoreDataSetCheck()) {
++								String msg = MessageFormat.format("Discovery service ''{0}'' cannot process data set ''{1}''.{2} - Ignoring and advancing to next service",
++										new Object[]{dsID, ds.getDataSet().getReference(), responseDetails});
++								logger.log(Level.INFO, msg);
++								// check for next queue
++								DiscoveryServiceSyncResponse dummyResponse = new DiscoveryServiceSyncResponse();
++								dummyResponse.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++								dummyResponse.setDetails(msg);
++								TrackerUtil.addDiscoveryServiceStartResponse(tracker, dummyResponse);
++								controlCenter.advanceToNextDiscoveryService(tracker);
++							} else {
++								tracker.setStatus(STATUS.ERROR);
++								String msg = MessageFormat.format("Discovery service ''{0}'' cannot process data set ''{1}''.{2}",
++										new Object[]{dsID, ds.getDataSet().getReference(), responseDetails});
++								tracker.setStatusDetails(msg);
++								logger.log(Level.WARNING, msg);
++							}
++						} else {
++							nextService.setExecutorService(executorService);
++							runServiceInBackground(executorService, tracker, nextRequest, nextService);
++						}
++					}
++				}
++			}
++		} catch (DiscoveryServiceUnreachableException exc) {
++			logger.log(Level.WARNING, "Discovery service could not be started because it is unreachable", exc);
++			if (tracker != null) {
++				tracker.setStatus(STATUS.ERROR);
++				tracker.setStatusDetails(exc.getReason());
++			}
++		} catch (Throwable exc) {
++			logger.log(Level.WARNING, "An error occurred when starting the discovery service", exc);
++			if (tracker != null) {
++				tracker.setStatus(STATUS.ERROR);
++				tracker.setStatusDetails(Utils.getExceptionAsString(exc));
++			}
++		}
++		updateTracker(tracker);
++	}
++
++	
++	class ServiceRunner implements ODFRunnable {
++		AnalysisRequestTracker tracker;
++		DiscoveryServiceRequest nextRequest;
++		SyncDiscoveryService nextService;
++		
++		public ServiceRunner(AnalysisRequestTracker tracker, DiscoveryServiceRequest nextRequest, SyncDiscoveryService nextService) {
++			super();
++			this.tracker = tracker;
++			this.nextRequest = nextRequest;
++			this.nextService = nextService;
++		}
++
++		@Override
++		public void run() {
++			try {
++				runService(tracker, nextRequest, nextService);
++			} catch (Throwable exc) {
++				logger.log(Level.WARNING, "An error occurred when running the discovery service", exc);
++				if (tracker != null) {
++					tracker.setStatus(STATUS.ERROR);
++					tracker.setStatusDetails(Utils.getExceptionAsString(exc));
++				}
++			}
++			updateTracker(tracker);
++		}
++		
++		@Override
++		public void setExecutorService(ExecutorService service) {
++			
++		}
++		
++		@Override
++		public boolean isReady() {
++			return true;
++		}
++		
++		@Override
++		public void cancel() {
++		}
++
++	}
++	
++	
++	private void runServiceInBackground(ExecutorService executorService, final AnalysisRequestTracker tracker, final DiscoveryServiceRequest nextRequest, final SyncDiscoveryService nextService) throws JSONException {
++		String suffix = nextRequest.getDiscoveryServiceId() + "_" + nextRequest.getOdfRequestId() + UUID.randomUUID().toString();
++		String runnerId = "DSRunner_" + suffix;
++		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
++		ServiceRunner serviceRunner = new ServiceRunner(tracker, nextRequest, nextService);
++		tm.setExecutorService(executorService);
++		tm.startUnmanagedThread(runnerId, serviceRunner);
++	}
++	
++	private void runService(AnalysisRequestTracker tracker, DiscoveryServiceRequest nextRequest, SyncDiscoveryService nextService) throws JSONException {
++		DiscoveryServiceResponse response = null;
++		String dsID = nextRequest.getDiscoveryServiceId();
++		boolean requiresAuxObjects = controlCenter.requiresMetaDataCache(nextService);
++		if (nextService instanceof SyncDiscoveryService) {
++			SyncDiscoveryService nextServiceSync = (SyncDiscoveryService) nextService;
++			logger.log(Level.FINER, "Starting synchronous analysis on service {0}", dsID);
++			DiscoveryServiceSyncResponse syncResponse = nextServiceSync.runAnalysis(cloneDSRequestAndAddServiceProps(nextRequest, requiresAuxObjects));
++			nextRequest.setFinishedProcessing(System.currentTimeMillis());
++			//Even if the analysis was concurrently cancelled we store the results since the service implementation could do this by itself either way.
++			long before = System.currentTimeMillis();
++			InternalAnnotationStoreUtils.storeDiscoveryServiceResult(syncResponse.getResult(), tracker.getRequest());
++			nextRequest.setTimeSpentStoringResults(System.currentTimeMillis() - before);
++			// remove result to reduce size of response
++			syncResponse.setResult(null);
++			response = syncResponse;
++		} else {
++			throw new RuntimeException("Unknown Java proxy created for service with id " + dsID);
++		}
++
++		// process response
++		if (response.getCode() == null) {
++			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++			String origDetails = response.getDetails();
++			response.setDetails(MessageFormat.format("Discovery service did not return a response code. Assuming error. Original message: {0}", origDetails));
++		}
++		switch (response.getCode()) {
++		case UNKNOWN_ERROR:
++			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
++			tracker.setStatus(STATUS.ERROR);
++			tracker.setStatusDetails(response.getDetails());
++			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unknown error ''{0}'', ''{1}''", new Object[] { response.getCode().name(),
++					response.getDetails(), dsID });
++			break;
++		case NOT_AUTHORIZED:
++			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
++			tracker.setStatus(STATUS.ERROR);
++			tracker.setStatusDetails(response.getDetails());
++			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unauthorized ''{0}'', ''{1}''", new Object[] { response.getCode().name(),
++					response.getDetails(), dsID });
++			break;
++		case TEMPORARILY_UNAVAILABLE:
++			tracker.setStatus(STATUS.IN_DISCOVERY_SERVICE_QUEUE);
++			logger.log(Level.INFO, "Discovery Service ''{2}'' responded that it is unavailable right now ''{0}'', ''{1}''", new Object[] {
++					response.getCode().name(), response.getDetails(), dsID });
++			// reqeue and finish immediately
++			controlCenter.getQueueManager().enqueue(tracker);
++			return;
++		case OK:
++			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
++			logger.log(Level.FINER, "Synchronous Discovery Service processed request ''{0}'', ''{1}''", new Object[] { response.getCode().name(), response.getDetails() });
++			AnalysisRequestTracker storedTracker = trackerStore.query(tracker.getRequest().getId());
++			//A user could've cancelled the analysis concurrently. In this case, ignore the response and don't overwrite the tracker
++			if (storedTracker != null && storedTracker.getStatus() != STATUS.CANCELLED) {
++				// check for next queue
++				controlCenter.advanceToNextDiscoveryService(tracker);
++			} else {
++				logger.log(Level.FINER, "Not advancing analysis request because it was cancelled!");
++			}
++			break;
++		default:
++			tracker.setStatus(STATUS.ERROR);
++			tracker.setStatusDetails(response.getDetails());
++			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unknown response ''{0}'', ''{1}''", new Object[] {
++					response.getCode().name(), response.getDetails(), dsID });
++			break;
++		}
++	}
++
++	private boolean updateTracker(AnalysisRequestTracker tracker) {
++		boolean cancelled = false;
++		if (tracker != null) {
++			AnalysisRequestTracker storedTracker = trackerStore.query(tracker.getRequest().getId());
++			//A user could've cancelled the analysis concurrently. In this case, ignore the response and don't overwrite the tracker
++			if (storedTracker == null || (! STATUS.CANCELLED.equals(storedTracker.getStatus())) ) {
++				Utils.setCurrentTimeAsLastModified(tracker);
++				trackerStore.store(tracker);
++			} else {
++				cancelled = true;
++				logger.log(Level.FINER, "Not storing analysis tracker changes because it was cancelled!");
++			}
++		}
++		return cancelled;
++	}
++	
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java
+new file mode 100755
+index 0000000..38e0747
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java
+@@ -0,0 +1,30 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++public class DiscoveryServiceUnreachableException extends RuntimeException {
++
++	private static final long serialVersionUID = 3581149213306073675L;
++	
++	private String reason;
++
++	public DiscoveryServiceUnreachableException(String reason) {
++		super(reason);
++		this.reason = reason;
++	}
++
++	public String getReason() {
++		return reason;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java
+new file mode 100755
+index 0000000..4cba0f6
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.Executors;
++
++public class ExecutorServiceFactory {
++
++	static Object execServiceLock = new Object();
++	static ExecutorService executorService = null;
++	
++	public ExecutorService createExecutorService() {
++		synchronized (execServiceLock) {
++			if (executorService == null) {
++				executorService = Executors.newCachedThreadPool();
++			}
++		}
++		return executorService;
++	}
++	
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java
+new file mode 100755
+index 0000000..848a673
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java
+@@ -0,0 +1,73 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.api.discoveryservice.*;
++
++public class HealthCheckServiceRuntime implements ServiceRuntime {
++	public static final String HEALTH_CHECK_RUNTIME_NAME = "HealthCheck";
++
++	@Override
++	public String getName() {
++		return HEALTH_CHECK_RUNTIME_NAME;
++	}
++
++	@Override
++	public long getWaitTimeUntilAvailable() {
++		return 0;
++	}
++
++	@Override
++	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
++		return new SyncDiscoveryServiceBase() {
++			
++			@Override
++			public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++				DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++				response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++				response.setDetails("Health check service finished successfully");
++				return response;
++			}
++		};
++	}
++	
++	public static DiscoveryServiceProperties getHealthCheckServiceProperties() {		
++		DiscoveryServiceProperties props = new DiscoveryServiceProperties();
++		props.setId(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
++		props.setDescription("Health check service");
++		
++		DiscoveryServiceEndpoint ep = new DiscoveryServiceEndpoint();
++		ep.setRuntimeName(HEALTH_CHECK_RUNTIME_NAME);
++		
++		props.setEndpoint(ep);
++		return props;
++	}
++
++	@Override
++	public String getDescription() {
++		return "Internal runtime dedicated to health checks";
++	}
++
++	@Override
++	public void validate(DiscoveryServiceProperties props) throws ValidationException {
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java
+new file mode 100755
+index 0000000..61a29b1
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java
+@@ -0,0 +1,87 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.settings.validation.ImplementationValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.core.Utils;
++
++public class JavaServiceRuntime implements ServiceRuntime {
++
++	Logger logger = Logger.getLogger(JavaServiceRuntime.class.getName());
++
++	public static final String NAME = "Java";
++	
++	@Override
++	public String getName() {
++		return NAME;
++	}
++
++	@Override
++	public long getWaitTimeUntilAvailable() {
++		// for now, always run
++		return 0;
++	}
++
++	@Override
++	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
++		DiscoveryService service = null;
++		String className = null;
++		try {
++			className = JSONUtils.convert(props.getEndpoint(), DiscoveryServiceJavaEndpoint.class).getClassName();
++			Class<?> clazz = Class.forName(className);
++			Object o = clazz.newInstance();
++			service = (DiscoveryService) o;
++		} catch (Exception e) {
++			logger.log(Level.FINE, "An error occurred while instatiating Java implementation", e);
++			logger.log(Level.WARNING, "Java implementation ''{0}'' for discovery service ''{1}'' could not be instantiated (internal error: ''{2}'')",
++					new Object[] { className, props.getId(), e.getMessage() });
++			return null;
++		}
++		if (service instanceof SyncDiscoveryService) {
++			return new TransactionSyncDiscoveryServiceProxy((SyncDiscoveryService) service);
++		} else if (service instanceof AsyncDiscoveryService) {
++			return new TransactionAsyncDiscoveryServiceProxy((AsyncDiscoveryService) service);
++		}
++		return service;
++	}
++
++	@Override
++	public String getDescription() {
++		return "The default Java runtime";
++	}
++
++	@Override
++	public void validate(DiscoveryServiceProperties props) throws ValidationException {
++		DiscoveryServiceJavaEndpoint javaEP;
++		try {
++			javaEP = JSONUtils.convert(props.getEndpoint(), DiscoveryServiceJavaEndpoint.class);
++		} catch (JSONException e) {
++			throw new ValidationException("Endpoint definition for Java service is not correct: " + Utils.getExceptionAsString(e));
++		}
++		new ImplementationValidator().validate("Service.endpoint", javaEP.getClassName());
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java
+new file mode 100755
+index 0000000..f999ecf
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java
+@@ -0,0 +1,27 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++
++public interface ODFRunnable extends Runnable {
++
++	void setExecutorService(ExecutorService service);
++	
++	void cancel();
++	
++	// return true if the runnable is likely to be ready to receive data
++	boolean isReady();
++	
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java
+new file mode 100755
+index 0000000..e6642c5
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java
+@@ -0,0 +1,32 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.ExecutorService;
++
++
++public interface QueueMessageProcessor {
++
++	/**
++	 * callback to process the message taken from the queue.
++	 * 
++	 * @param executorService
++	 * @param msg The message to be processed
++	 * @param partition The kafka topic partition this message was read from
++	 * @param msgOffset The offset of this particular message on this kafka partition
++	 * @return
++	 */
++	void process(ExecutorService executorService, String msg, int partition, long msgOffset);
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java
+new file mode 100755
+index 0000000..da06dd2
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java
+@@ -0,0 +1,42 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++
++public interface ServiceRuntime {
++	
++	String getName();
++	
++	/**
++	 * Check if the runtime is currently available for processing.
++	 * Returns <= 0 if the runtime is available immediately. A number > 0
++	 * indicates how many seconds to wait until retrying.
++	 * 
++	 * Note: If this method returns > 0 the Kafka consumer will be shut down and only be 
++	 * started again when it returns <= 0. Shutting down and restarting the consumer is
++	 * rather costly so this should only be done if the runtime won't be accepting requests
++	 * for a foreseeable period of time.
++	 */
++	long getWaitTimeUntilAvailable();
++
++	DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props);
++
++	String getDescription();
++	
++	void validate(DiscoveryServiceProperties props) throws ValidationException;
++	
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java
+new file mode 100755
+index 0000000..a867580
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java
+@@ -0,0 +1,147 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.io.InputStreamReader;
++import java.io.LineNumberReader;
++import java.net.URL;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.Enumeration;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Set;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.engine.ServiceRuntimeInfo;
++import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++
++public class ServiceRuntimes {
++
++	static Logger logger = Logger.getLogger(ServiceRuntimes.class.getName());
++
++	static List<ServiceRuntime> getRuntimeExtensions() throws IOException {
++		ClassLoader cl = ServiceRuntimes.class.getClassLoader();
++		Enumeration<URL> services = cl.getResources("META-INF/odf/odf-runtimes.txt");
++		List<ServiceRuntime> result = new ArrayList<>();
++		while (services.hasMoreElements()) {
++			URL url = services.nextElement();
++			InputStream is = url.openStream();
++			InputStreamReader isr = new InputStreamReader(is, "UTF-8");
++			LineNumberReader lnr = new LineNumberReader(isr);
++			String line = null;
++			while ((line = lnr.readLine()) != null) {
++				line = line.trim();
++				logger.log(Level.INFO,  "Loading runtime extension ''{0}''", line);
++				try {
++					@SuppressWarnings("unchecked")
++					Class<ServiceRuntime> clazz = (Class<ServiceRuntime>) cl.loadClass(line);
++					ServiceRuntime sr = clazz.newInstance();
++					result.add(sr);
++				} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
++					logger.log(Level.WARNING, MessageFormat.format("Runtime extension of class ''{0}'' could not be instantiated", line), e);
++				} 
++			}
++		}
++		logger.log(Level.INFO, "Number of classpath services found: {0}", result.size());
++		return result;
++	}
++	
++	static {
++		List<ServiceRuntime> allRuntimes = new ArrayList<>(Arrays.asList( //
++				new HealthCheckServiceRuntime(), //
++				new JavaServiceRuntime(), //
++				new SparkServiceRuntime() //
++		));
++		try {
++			List<ServiceRuntime> runtimeExtensions = getRuntimeExtensions();
++			allRuntimes.addAll(runtimeExtensions);
++		} catch (IOException e) {
++			logger.log(Level.WARNING, "An exception occurred when loading runtime extensions, ignoring them", e);
++		}
++		runtimes = Collections.unmodifiableList(allRuntimes);
++	}
++
++	private static List<ServiceRuntime> runtimes;
++
++	public static List<ServiceRuntime> getActiveRuntimes() {
++		Environment env = new ODFInternalFactory().create(Environment.class);
++		List<String> activeRuntimeNames = env.getActiveRuntimeNames();
++		if (activeRuntimeNames == null) {
++			return getAllRuntimes();
++		}
++		// always add health check runtime
++		Set<String> activeRuntimeNamesSet = new HashSet<>(activeRuntimeNames);
++		activeRuntimeNamesSet.add(HealthCheckServiceRuntime.HEALTH_CHECK_RUNTIME_NAME);
++		List<ServiceRuntime> activeRuntimes = new ArrayList<>();
++		for (ServiceRuntime rt : runtimes) {
++			if (activeRuntimeNamesSet.contains(rt.getName())) {
++				activeRuntimes.add(rt);
++			}
++		}
++		return activeRuntimes;
++	}
++
++	public static List<ServiceRuntime> getAllRuntimes() {
++		return runtimes;
++	}
++
++	public static ServiceRuntime getRuntimeForDiscoveryService(DiscoveryServiceProperties discoveryServiceProps) {
++		DiscoveryServiceEndpoint ep = discoveryServiceProps.getEndpoint();
++		for (ServiceRuntime runtime : getAllRuntimes()) {
++			if (runtime.getName().equals(ep.getRuntimeName())) {
++				return runtime;
++			}
++		}
++		return null;
++	}
++
++	public static ServiceRuntime getRuntimeForDiscoveryService(String discoveryServiceId) {
++		// special check because the healch check runtime is not part of the configuration
++		if (discoveryServiceId.startsWith(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID)) {
++			return new HealthCheckServiceRuntime();
++		}
++		DiscoveryServiceManager dsm = new ODFInternalFactory().create(DiscoveryServiceManager.class);
++		try {
++			DiscoveryServiceProperties props = dsm.getDiscoveryServiceProperties(discoveryServiceId);
++			return getRuntimeForDiscoveryService(props);
++		} catch (ServiceNotFoundException e) {
++			return null;
++		}
++	}
++
++	public static ServiceRuntimesInfo getRuntimesInfo(List<ServiceRuntime> runtimes) {
++		List<ServiceRuntimeInfo> rts = new ArrayList<>();
++		for (ServiceRuntime rt : runtimes) {
++			ServiceRuntimeInfo sri = new ServiceRuntimeInfo();
++			sri.setName(rt.getName());
++			sri.setDescription(rt.getDescription());
++			rts.add(sri);
++		}
++		ServiceRuntimesInfo result = new ServiceRuntimesInfo();
++		result.setRuntimes(rts);
++		return result;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java
+new file mode 100755
+index 0000000..6dc1fd0
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java
+@@ -0,0 +1,110 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.text.MessageFormat;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.core.Utils;
++
++/**
++ * Proxy for calling any type of Spark discovery services.
++ * 
++ *
++ */
++
++public class SparkDiscoveryServiceProxy implements SyncDiscoveryService {
++	Logger logger = Logger.getLogger(SparkDiscoveryServiceProxy.class.getName());
++
++	protected MetadataStore metadataStore;
++	protected AnnotationStore annotationStore;
++	protected ExecutorService executorService;
++	private DiscoveryServiceProperties dsri;
++
++	public SparkDiscoveryServiceProxy(DiscoveryServiceProperties dsri) {
++		this.dsri = dsri;
++	}
++
++	@Override
++	public void setExecutorService(ExecutorService executorService) {
++		this.executorService = executorService;
++	}
++
++	@Override
++	public void setMetadataStore(MetadataStore metadataStore) {
++		this.metadataStore = metadataStore;
++	}
++
++	@Override
++	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
++		DataSetCheckResult checkResult = new DataSetCheckResult();
++		checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
++		try {
++			SparkServiceExecutor executor = new ODFInternalFactory().create(SparkServiceExecutor.class);
++			checkResult = executor.checkDataSet(this.dsri, dataSetContainer);
++		} catch (Exception e) {
++			logger.log(Level.WARNING,"Error running discovery service.", e);
++			checkResult.setDetails(Utils.getExceptionAsString(e));
++		}
++		return checkResult;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		logger.log(Level.INFO,MessageFormat.format("Starting Spark discovery service ''{0}'', id {1}.", new Object[]{ dsri.getName(), dsri.getId() }));
++		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++		DiscoveryServiceSparkEndpoint endpoint;
++		try {
++			endpoint = JSONUtils.convert(dsri.getEndpoint(),  DiscoveryServiceSparkEndpoint.class);
++		} catch (JSONException e1) {
++			throw new RuntimeException(e1);
++		}
++		if ((endpoint.getJar() == null) || (endpoint.getJar().isEmpty())) {
++			response.setDetails("No jar file  was provided that implements the Spark application.");
++		} else try {
++			SparkServiceExecutor executor = new ODFInternalFactory().create(SparkServiceExecutor.class);
++			response = executor.runAnalysis(this.dsri, request);
++			logger.log(Level.FINER, "Spark discovery service response: " + response.toString());
++			logger.log(Level.INFO,"Spark discover service finished.");
++			return response;
++		} catch (Exception e) {
++			logger.log(Level.WARNING,"Error running Spark application: ", e);
++			response.setDetails(Utils.getExceptionAsString(e));
++		}
++		response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++		return response;
++	}
++
++	@Override
++	public void setAnnotationStore(AnnotationStore annotationStore) {
++		this.annotationStore = annotationStore;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java
+new file mode 100755
+index 0000000..91056b3
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java
+@@ -0,0 +1,58 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class SparkServiceRuntime implements ServiceRuntime {
++
++	public static final String SPARK_RUNTIME_NAME = "Spark";
++	
++	@Override
++	public String getName() {
++		return SPARK_RUNTIME_NAME;
++	}
++
++	@Override
++	public long getWaitTimeUntilAvailable() {
++		return 0;
++	}
++
++	@Override
++	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
++		return new SparkDiscoveryServiceProxy(props);
++	}
++
++	@Override
++	public String getDescription() {
++		return "The default Spark runtime";
++	}
++
++	@Override
++	public void validate(DiscoveryServiceProperties props) throws ValidationException {
++		try {
++			JSONUtils.convert(props.getEndpoint(),  DiscoveryServiceSparkEndpoint.class);
++		} catch (JSONException e1) {
++			throw new ValidationException("Endpoint definition for Spark service is not correct: " + Utils.getExceptionAsString(e1));
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java
+new file mode 100755
+index 0000000..206a6d0
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java
+@@ -0,0 +1,52 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++
++// JSON
++public class StatusQueueEntry {
++
++	private Annotation annotation;
++	private AnalysisRequestTracker analysisRequestTracker;
++
++	public Annotation getAnnotation() {
++		return annotation;
++	}
++
++	public void setAnnotation(Annotation annotation) {
++		this.annotation = annotation;
++	}
++
++	public AnalysisRequestTracker getAnalysisRequestTracker() {
++		return analysisRequestTracker;
++	}
++
++	public void setAnalysisRequestTracker(AnalysisRequestTracker analysisRequestTracker) {
++		this.analysisRequestTracker = analysisRequestTracker;
++	}
++
++	
++	public static String getRequestId(StatusQueueEntry seq) {
++		if (seq.getAnnotation() != null) {
++			return seq.getAnnotation().getAnalysisRun();
++		} else if (seq.getAnalysisRequestTracker() != null) {
++			return seq.getAnalysisRequestTracker().getRequest().getId();
++		}
++		return null;
++	}
++
++	
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java
+new file mode 100755
+index 0000000..33dba10
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java
+@@ -0,0 +1,68 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.List;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeoutException;
++
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++
++public interface ThreadManager {
++
++	void waitForThreadsToBeReady(long waitingLimitMs, List<ThreadStartupResult> startedThreads) throws TimeoutException;
++
++	ThreadStartupResult startUnmanagedThread(String name, ODFRunnable runnable);
++	
++	ThreadStatus.ThreadState getStateOfUnmanagedThread(String name);
++	
++	ODFRunnable getRunnable(String name);
++	
++	void setExecutorService(ExecutorService executorService);
++	
++	void shutdownAllUnmanagedThreads();
++	
++	void shutdownThreads(List<String> names);
++	
++	int getNumberOfRunningThreads();
++
++	List<ThreadStatus> getThreadManagerStatus();
++
++	public abstract class ThreadStartupResult {
++
++		private String threadId;
++		private boolean newThreadCreated;
++
++		public ThreadStartupResult(String id) {
++			this.threadId = id;
++		}
++
++		public String getThreadId() {
++			return threadId;
++		}
++
++		public boolean isNewThreadCreated() {
++			return newThreadCreated;
++		}
++
++		public void setNewThreadCreated(boolean newThreadCreated) {
++			this.newThreadCreated = newThreadCreated;
++		}
++
++		public abstract boolean isReady();
++
++	}
++
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java
+new file mode 100755
+index 0000000..f1c7704
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java
+@@ -0,0 +1,76 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++
++public class TrackerUtil {
++	
++	/**
++	 * @param tracker
++	 * @return true if the first analysis of the tracker has not yet been started
++	 */
++	public static boolean isAnalysisWaiting(AnalysisRequestTracker tracker) {
++		return tracker.getNextDiscoveryServiceRequest() == 0 && (tracker.getStatus() == STATUS.IN_DISCOVERY_SERVICE_QUEUE || tracker.getStatus() == STATUS.INITIALIZED); // || tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING);
++	}
++	
++	public static boolean isCancellable(AnalysisRequestTracker tracker)  {
++		return (tracker.getStatus() == STATUS.IN_DISCOVERY_SERVICE_QUEUE || tracker.getStatus() == STATUS.INITIALIZED || tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING);
++	}
++
++	public static DiscoveryServiceRequest getCurrentDiscoveryServiceStartRequest(AnalysisRequestTracker tracker) {
++		int i = tracker.getNextDiscoveryServiceRequest();
++		List<DiscoveryServiceRequest> requests = tracker.getDiscoveryServiceRequests();
++		if (i >= 0 && i < requests.size()) {
++			return requests.get(i);
++		}
++		return null;
++	}
++
++	public static DiscoveryServiceResponse getCurrentDiscoveryServiceStartResponse(AnalysisRequestTracker tracker) {
++		int i = tracker.getNextDiscoveryServiceRequest();
++		List<DiscoveryServiceResponse> responses = tracker.getDiscoveryServiceResponses();
++		if (responses == null || responses.isEmpty()) {
++			return null;
++		}
++		if (i >= 0 && i < responses.size()) {
++			return responses.get(i);
++		}
++		return null;
++	}
++
++	public static void moveToNextDiscoveryService(AnalysisRequestTracker tracker) {
++		int i = tracker.getNextDiscoveryServiceRequest();
++		List<DiscoveryServiceRequest> requests = tracker.getDiscoveryServiceRequests();
++		if (i >= 0 && i < requests.size()) {
++			tracker.setNextDiscoveryServiceRequest(i+1);
++		}
++	}
++
++	public static void addDiscoveryServiceStartResponse(AnalysisRequestTracker tracker, DiscoveryServiceResponse response) {
++		List<DiscoveryServiceResponse> l = tracker.getDiscoveryServiceResponses();
++		if (l == null) {
++			l = new ArrayList<DiscoveryServiceResponse>();
++			tracker.setDiscoveryServiceResponses(l);
++		}
++		l.add(response);
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java
+new file mode 100755
+index 0000000..1a3de04
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java
+@@ -0,0 +1,97 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.Callable;
++import java.util.concurrent.ExecutorService;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++
++public class TransactionAsyncDiscoveryServiceProxy implements AsyncDiscoveryService {
++
++	private AsyncDiscoveryService wrappedService;
++
++	public TransactionAsyncDiscoveryServiceProxy(AsyncDiscoveryService wrappedService) {
++		this.wrappedService = wrappedService;
++	}
++
++	public DiscoveryServiceAsyncStartResponse startAnalysis(final DiscoveryServiceRequest request) {
++		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++		try {
++			return (DiscoveryServiceAsyncStartResponse) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++
++				@Override
++				public Object call() throws Exception {
++					return wrappedService.startAnalysis(request);
++				}
++			});
++		} catch (Exception e) {
++			throw new RuntimeException(e);
++		}
++
++	}
++
++	public DiscoveryServiceAsyncRunStatus getStatus(final String runId) {
++		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++		try {
++			return (DiscoveryServiceAsyncRunStatus) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++
++				@Override
++				public Object call() throws Exception {
++					return wrappedService.getStatus(runId);
++				}
++			});
++		} catch (Exception e) {
++			throw new RuntimeException(e);
++		}
++
++	}
++
++	public void setExecutorService(ExecutorService executorService) {
++		wrappedService.setExecutorService(executorService);
++	}
++
++	public void setMetadataStore(MetadataStore metadataStore) {
++		wrappedService.setMetadataStore(metadataStore);
++	}
++
++	public void setAnnotationStore(AnnotationStore annotationStore) {
++		wrappedService.setAnnotationStore(annotationStore);
++	}
++
++	public DataSetCheckResult checkDataSet(final DataSetContainer dataSetContainer) {
++		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++		try {
++			return (DataSetCheckResult) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++
++				@Override
++				public Object call() throws Exception {
++					return wrappedService.checkDataSet(dataSetContainer);
++				}
++			});
++		} catch (Exception e) {
++			throw new RuntimeException(e);
++		}
++
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java
+new file mode 100755
+index 0000000..6c17686
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.Callable;
++
++/**
++ * Use this interface in the core framework whenever you want to run code that is run from an unmanaged thread (typically in the Kafka consumers)
++ * and that accesses the metadata repository. The implementation of this class will ensure that the code will be run in the
++ * correct context (regarding transactions etc.)
++ * 
++ *
++ */
++public interface TransactionContextExecutor {
++	
++	/**
++	 * Run a generic callable in a transaction context. This is not a template function as some of the underlying infrastructures
++	 * might not be able to support it.
++	 */
++	Object runInTransactionContext(Callable<Object> callable) throws Exception;
++	
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java
+new file mode 100755
+index 0000000..ec96e96
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java
+@@ -0,0 +1,79 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.controlcenter;
++
++import java.util.concurrent.Callable;
++import java.util.concurrent.ExecutorService;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++
++public class TransactionSyncDiscoveryServiceProxy implements SyncDiscoveryService {
++
++	private SyncDiscoveryService wrappedService;
++
++	public TransactionSyncDiscoveryServiceProxy(SyncDiscoveryService wrappedService) {
++		this.wrappedService = wrappedService;
++	}
++
++	public DiscoveryServiceSyncResponse runAnalysis(final DiscoveryServiceRequest request) {
++		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++		try {
++			return (DiscoveryServiceSyncResponse) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++
++				@Override
++				public Object call() throws Exception {
++					return wrappedService.runAnalysis(request);
++				}
++			});
++		} catch (Exception e) {
++			throw new RuntimeException(e);
++		}
++
++	}
++
++	public void setExecutorService(ExecutorService executorService) {
++		wrappedService.setExecutorService(executorService);
++	}
++
++	public void setMetadataStore(MetadataStore metadataStore) {
++		wrappedService.setMetadataStore(metadataStore);
++	}
++
++	public void setAnnotationStore(AnnotationStore annotationStore) {
++		wrappedService.setAnnotationStore(annotationStore);
++	}
++
++	public DataSetCheckResult checkDataSet(final DataSetContainer dataSetContainer) {
++		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
++		try {
++			return (DataSetCheckResult) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
++
++				@Override
++				public Object call() throws Exception {
++					return wrappedService.checkDataSet(dataSetContainer);
++				}
++			});
++		} catch (Exception e) {
++			throw new RuntimeException(e);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java
+new file mode 100755
+index 0000000..e7cbc44
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java
+@@ -0,0 +1,258 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.discoveryservice;
++
++import java.io.BufferedReader;
++import java.io.IOException;
++import java.io.InputStream;
++import java.io.InputStreamReader;
++import java.net.MalformedURLException;
++import java.net.URL;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.Iterator;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.configuration.ConfigManager;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++
++/**
++ *
++ * External Java API for creating and managing discovery services
++ *
++ */
++public class DiscoveryServiceManagerImpl implements DiscoveryServiceManager {
++	private Logger logger = Logger.getLogger(DiscoveryServiceManagerImpl.class.getName());
++	public ConfigManager configManager;
++
++	public DiscoveryServiceManagerImpl() {
++		configManager = new ODFInternalFactory().create(ConfigManager.class);
++	}
++
++	/**
++	 * Retrieve list of discovery services registered in ODF
++	 * @return List of registered ODF discovery services
++	 */
++	public List<DiscoveryServiceProperties> getDiscoveryServicesProperties() {
++		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServicesProperties");
++		List<DiscoveryServiceProperties> dsProperties = configManager.getConfigContainer().getRegisteredServices();
++		return dsProperties;
++	};
++
++	/**
++	 * Register a new service in ODF
++	 * @param dsProperties Properties of the discovery service to register
++	 * @throws ValidationException Validation of a property failed
++	 */
++	public void createDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "createDiscoveryService");
++		ConfigContainer update = new ConfigContainer();
++		List<DiscoveryServiceProperties> registeredServices = configManager.getConfigContainer().getRegisteredServices();
++		registeredServices.addAll(Collections.singletonList(dsProperties));
++		update.setRegisteredServices(registeredServices);
++		configManager.updateConfigContainer(update);
++
++
++	};
++
++	/**
++	 * Update configuration of an ODF discovery service
++	 * @param dsProperties Properties of the discovery service to update
++	 */
++	public void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ServiceNotFoundException, ValidationException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "updateDiscoveryService");
++		String serviceId = dsProperties.getId();
++		deleteDiscoveryService(serviceId);
++		createDiscoveryService(dsProperties);
++	};
++
++	/**
++	 * Remove a registered service from ODF
++	 * @param serviceId Discovery service ID
++	 */
++	public void deleteDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "deleteDiscoveryService");
++		ConfigContainer cc = configManager.getConfigContainer();
++		Iterator<DiscoveryServiceProperties> iterator = cc.getRegisteredServices().iterator();
++		boolean serviceFound = false;
++		while (iterator.hasNext()) {
++			if (iterator.next().getId().equals(serviceId)) {
++				iterator.remove();
++				serviceFound = true;
++			}
++		}
++		if (!serviceFound) {
++			throw new ServiceNotFoundException(serviceId);
++		} else {
++			configManager.updateConfigContainer(cc);
++		}
++	};
++
++	/**
++	 * Retrieve current configuration of a discovery services registered in ODF
++	 * @param serviceId Discovery Service ID
++	 * @return Properties of the service with this ID
++	 * @throws ServiceNotFoundException A service with this ID is not registered
++	 */
++	public DiscoveryServiceProperties getDiscoveryServiceProperties(String serviceId) throws ServiceNotFoundException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceProperties");
++		DiscoveryServiceProperties serviceFound = null;
++		List<DiscoveryServiceProperties> registeredServices;
++		registeredServices = configManager.getConfigContainer().getRegisteredServices();
++		for (DiscoveryServiceProperties service : registeredServices) {
++			if (service.getId().equals(serviceId)) {
++				serviceFound = service;
++				break;
++			}
++		}
++		if (serviceFound == null) {
++			throw new ServiceNotFoundException(serviceId);
++		}
++		return serviceFound;
++	};
++
++	/**
++	 * Retrieve status overview of all discovery services registered in ODF
++	 * @return List of status count maps for all discovery services
++	 */
++	public List<ServiceStatusCount> getDiscoveryServiceStatusOverview() {
++		DiscoveryServiceStatistics stats = new DiscoveryServiceStatistics(new ODFInternalFactory().create(AnalysisRequestTrackerStore.class).getRecentTrackers(0,-1));
++		return stats.getStatusCountPerService();
++	}
++
++	/**
++	 * Retrieve status of a specific discovery service. Returns null if no service info can be obtained
++	 * @param serviceId Discovery Service ID
++	 * @return Status of the service with this ID
++	 */
++	public DiscoveryServiceStatus getDiscoveryServiceStatus(String serviceId) throws ServiceNotFoundException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceStatus");
++
++		DiscoveryServiceStatus dsStatus = null;
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		DiscoveryService ds = cc.getDiscoveryServiceProxy(serviceId, null);
++		if (ds == null) {
++			throw new ServiceNotFoundException(serviceId);
++		}
++		dsStatus = new DiscoveryServiceStatus();
++		dsStatus.setStatus(DiscoveryServiceStatus.Status.OK);
++		dsStatus.setMessage(MessageFormat.format("Discovery service ''{0}'' status is OK", serviceId));
++		ServiceStatusCount serviceStatus = null;
++		List<ServiceStatusCount> statusCounts = getDiscoveryServiceStatusOverview();
++		for (ServiceStatusCount cnt : statusCounts) {
++			if (cnt.getId().equals(serviceId)) {
++				serviceStatus = cnt;
++				break;
++			}
++		}
++		if (serviceStatus != null) {
++			dsStatus.setStatusCount(serviceStatus);
++		}
++		return dsStatus;
++	};
++
++	/**
++	 * Retrieve runtime statistics of a specific discovery service
++	 * @param serviceId Discovery Service ID
++	 * @return Runtime statistics of the service with this ID
++	 */
++	public DiscoveryServiceRuntimeStatistics getDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceRuntimeStatistics");
++		DiscoveryServiceRuntimeStatistics dsrs = new DiscoveryServiceRuntimeStatistics();
++		dsrs.setAverageProcessingTimePerItemInMillis(0);   // TODO: implement
++		return dsrs;
++	};
++
++	/**
++	 * Delete runtime statistics of a specific discovery service
++	 * @param serviceId Discovery Service ID
++	 */
++	public void deleteDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "deleteDiscoveryServiceRuntimeStatistics");
++		// TODO: implement
++	};
++
++	/**
++	 * Retrieve picture representing a discovery service
++	 * @param serviceId Discovery Service ID
++	 * @return Input stream for image
++	 */
++	public InputStream getDiscoveryServiceImage(String serviceId) throws ServiceNotFoundException {
++		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceImage");
++		final String defaultImageDir = "org/apache/atlas/odf/images";
++
++		String imgUrl = null;
++		for (DiscoveryServiceProperties info : configManager.getConfigContainer().getRegisteredServices()) {
++			if (info.getId().equals(serviceId)) {
++				imgUrl = info.getIconUrl();
++				break;
++			}
++		}
++
++		ClassLoader cl = this.getClass().getClassLoader();
++		InputStream is = null;
++		if (imgUrl != null) {
++			is = cl.getResourceAsStream("META-INF/odf/" + imgUrl);
++			if (is == null) {
++				is = cl.getResourceAsStream(defaultImageDir + "/" + imgUrl);
++				if (is == null) {
++					try {
++						is = new URL(imgUrl).openStream();
++					} catch (MalformedURLException e) {
++						logger.log(Level.WARNING, "The specified image url {0} for service {1} is invalid!", new String[] { imgUrl, serviceId });
++					} catch (IOException e) {
++						logger.log(Level.WARNING, "The specified image url {0} for service {1} could not be accessed!", new String[] { imgUrl, serviceId });
++					}
++				}
++			}
++		}
++		if (imgUrl == null || is == null) {
++			//TODO is this correct? maybe we should use a single default image instead of a random one
++			try {
++				is = cl.getResourceAsStream(defaultImageDir);
++				if (is != null) {
++					InputStreamReader r = new InputStreamReader(is);
++					BufferedReader br = new BufferedReader(r);
++					List<String> images = new ArrayList<>();
++					String line = null;
++					while ((line = br.readLine()) != null) {
++						images.add(line);
++					}
++					// return random image
++					int ix = Math.abs(serviceId.hashCode()) % images.size();
++					is = cl.getResourceAsStream(defaultImageDir + "/" + images.get(ix));
++				}
++			} catch (IOException exc) {
++				logger.log(Level.WARNING, "Exception occurred while retrieving random image, ignoring it", exc);
++				is = null;
++			}
++		}
++		return is;
++	};
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java
+new file mode 100755
+index 0000000..6be0e5a
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java
+@@ -0,0 +1,83 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.discoveryservice;
++
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.LinkedHashMap;
++import java.util.List;
++import java.util.Map;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
++
++public class DiscoveryServiceStatistics {
++
++	private List<AnalysisRequestTracker> requests = new ArrayList<AnalysisRequestTracker>();
++
++	public DiscoveryServiceStatistics(List<AnalysisRequestTracker> requests) {
++		this.requests = requests;
++	}
++
++	public List<ServiceStatusCount> getStatusCountPerService() {
++		List<ServiceStatusCount> result = new ArrayList<ServiceStatusCount>();
++
++		Map<String, LinkedHashMap<STATUS, Integer>> statusMap = new HashMap<String, LinkedHashMap<STATUS, Integer>>();
++
++		for (AnalysisRequestTracker tracker : requests) {
++			int maxDiscoveryServiceRequest = (tracker.getNextDiscoveryServiceRequest() == 0 ? 1 : tracker.getNextDiscoveryServiceRequest());
++			for (int no = 0; no < maxDiscoveryServiceRequest; no++) {
++				STATUS cntStatus = tracker.getStatus();
++
++				//No parallel requests are possible atm -> all requests leading to current one must be finished
++				if (no < maxDiscoveryServiceRequest - 1) {
++					cntStatus = STATUS.FINISHED;
++				}
++
++				DiscoveryServiceRequest req = tracker.getDiscoveryServiceRequests().get(no);
++				LinkedHashMap<STATUS, Integer> cntMap = statusMap.get(req.getDiscoveryServiceId());
++				if (cntMap == null) {
++					cntMap = new LinkedHashMap<STATUS, Integer>();
++					//add 0 default values
++					for (STATUS status : STATUS.values()) {
++						cntMap.put(status, 0);
++					}
++				}
++				Integer val = cntMap.get(cntStatus);
++				val++;
++				cntMap.put(cntStatus, val);
++				statusMap.put(req.getDiscoveryServiceId(), cntMap);
++			}
++		}
++
++		for (String key : statusMap.keySet()) {
++			ServiceStatusCount cnt = new ServiceStatusCount();
++			cnt.setId(key);
++			for (DiscoveryServiceProperties info : new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
++				if (info.getId().equals(key)) {
++					cnt.setName(info.getName());
++					break;
++				}
++			}
++			cnt.setStatusCountMap(statusMap.get(key));
++			result.add(cnt);
++		}
++
++		return result;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java
+new file mode 100755
+index 0000000..d09297a
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java
+@@ -0,0 +1,221 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.engine;
++
++import java.io.InputStream;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.Date;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.engine.EngineManager;
++import org.apache.atlas.odf.api.engine.MessagingStatus;
++import org.apache.atlas.odf.api.engine.ODFEngineOptions;
++import org.apache.atlas.odf.api.engine.ODFStatus;
++import org.apache.atlas.odf.api.engine.ODFVersion;
++import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
++import org.apache.atlas.odf.api.engine.SystemHealth;
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.core.ODFInitializer;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.ODFUtils;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++
++/**
++*
++* External Java API for managing and controlling the ODF engine
++*
++*/
++public class EngineManagerImpl implements EngineManager {
++
++	private Logger logger = Logger.getLogger(EngineManagerImpl.class.getName());
++
++	public EngineManagerImpl() {
++	}
++
++	/**
++	 * Checks the health status of ODF
++	 *
++	 * @return Health status of the ODF engine
++	 */
++	public SystemHealth checkHealthStatus() {
++		SystemHealth health = new SystemHealth();
++		try {
++			AnalysisRequest dummyRequest = new AnalysisRequest();
++			String dataSetID = ControlCenter.HEALTH_TEST_DATA_SET_ID_PREFIX + UUID.randomUUID().toString();
++			MetaDataObjectReference dataSetRef = new MetaDataObjectReference();
++			dataSetRef.setId(dataSetID);
++			dummyRequest.setDataSets(Collections.singletonList(dataSetRef));
++			List<String> discoveryServiceSequence = new ArrayList<String>();
++			discoveryServiceSequence.add(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
++			dummyRequest.setDiscoveryServiceSequence(discoveryServiceSequence);
++
++			AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++			AnalysisResponse resp = analysisManager.runAnalysis(dummyRequest);
++			String reqId = resp.getId();
++			AnalysisRequestStatus status = null;
++			final int maxNumberOfTimesToPoll = 500;
++			int count = 0;
++			int msToSleepBetweenPolls = 20;
++			boolean continuePolling = false;
++			do {
++				status = analysisManager.getAnalysisRequestStatus(reqId);
++				continuePolling = (status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.NOT_FOUND) && count < maxNumberOfTimesToPoll;
++				if (continuePolling) {
++					count++;
++					Thread.sleep(msToSleepBetweenPolls);
++				}
++			} while (continuePolling);
++			logger.log(Level.INFO, "Health check request ''{3}'' has status ''{0}'', time spent: {2}ms details ''{1}''", new Object[] { status.getState(), status.getDetails(),
++					count * msToSleepBetweenPolls, reqId });
++			health.getMessages().add(MessageFormat.format("Details message: {0}", status.getDetails()));
++			if (count >= maxNumberOfTimesToPoll) {
++				health.setStatus( SystemHealth.HealthStatus.WARNING);
++				String msg = MessageFormat.format("Health test request could not be processed in time ({0}ms)", (maxNumberOfTimesToPoll * msToSleepBetweenPolls));
++				logger.log(Level.INFO, msg);
++				health.getMessages().add(msg);
++			} else {
++				switch (status.getState()) {
++				case NOT_FOUND:
++					health.setStatus(SystemHealth.HealthStatus.ERROR);
++					health.getMessages().add(MessageFormat.format("Request ID ''{0}'' got lost", reqId));
++					break;
++				case ERROR:
++					health.setStatus(SystemHealth.HealthStatus.ERROR);
++					break;
++				case FINISHED:
++					health.setStatus(SystemHealth.HealthStatus.OK);
++					break;
++				default:
++					health.setStatus(SystemHealth.HealthStatus.ERROR);
++				}
++			}
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An unknown error occurred", exc);
++			health.setStatus(SystemHealth.HealthStatus.ERROR);
++			health.getMessages().add(Utils.getExceptionAsString(exc));
++		}
++		return health;
++	}
++
++	/**
++	 * Returns the status of the ODF thread manager
++	 *
++	 * @return Status of all threads making up the ODF thread manager
++	 */
++	public List<ThreadStatus> getThreadManagerStatus() {
++		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
++		return tm.getThreadManagerStatus();
++	}
++
++	/**
++	 * Returns the status of the ODF messaging subsystem
++	 *
++	 * @return Status of the ODF messaging subsystem
++	 */
++	public MessagingStatus getMessagingStatus() {
++		return new ODFInternalFactory().create(DiscoveryServiceQueueManager.class).getMessagingStatus();
++	}
++
++	/**
++	 * Returns the status of the messaging subsystem and the internal thread manager
++	 *
++	 * @return Combined status of the messaging subsystem and the internal thread manager
++	 */
++	public ODFStatus getStatus() {
++		ODFStatus status = new ODFStatus();
++		status.setMessagingStatus(this.getMessagingStatus());
++		status.setThreadManagerStatus(this.getThreadManagerStatus());
++		return status;
++	}
++
++	/**
++	 * Returns the current ODF version
++	 *
++	 * @return ODF version identifier
++	 */
++	public ODFVersion getVersion() {
++		InputStream is = ODFUtils.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/odfversion.txt");
++		ODFVersion version = new ODFVersion();
++		if (is == null) {
++			version.setVersion("NOTFOUND");
++		} else {
++			version.setVersion(Utils.getInputStreamAsString(is, "UTF-8").trim());
++		}
++		return version;
++	}
++
++	/**
++	 * Shuts down the ODF engine, purges all scheduled analysis requests from the queues, and cancels all running analysis requests.
++	 * This means that all running jobs will be cancelled or their results will not be reported back.
++	 * (for debugging purposes only)
++	 *
++	 * @param options Option for immediately restarting the engine after shutdown (default is not to restart immediately but only when needed)
++	 */
++	public void shutdown(ODFEngineOptions options) {
++		long currentTime = System.currentTimeMillis();
++
++		ControlCenter controlCenter = new ODFInternalFactory().create(ControlCenter.class);
++		AdminMessage shutDownMessage = new AdminMessage();
++		Type t = Type.SHUTDOWN;
++		if (options.isRestart()) {
++			t = Type.RESTART;
++		}
++		shutDownMessage.setAdminMessageType(t);
++		String detailMsg = MessageFormat.format("Shutdown was requested on {0} via ODF API", new Object[] { new Date() });
++		shutDownMessage.setDetails(detailMsg);
++		logger.log(Level.INFO, detailMsg);
++		controlCenter.getQueueManager().enqueueInAdminQueue(shutDownMessage);
++		int maxPolls = 60;
++		int counter = 0;
++		int timeBetweenPollsMs = 1000;
++		while (counter < maxPolls && ODFInitializer.getLastStopTimestamp() <= currentTime) {
++			try {
++				Thread.sleep(timeBetweenPollsMs);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++			}
++			counter++;
++		}
++		long timeWaited = ((counter * timeBetweenPollsMs) / 1000);
++		logger.log(Level.INFO, "Waited for {0} seconds for shutdown", timeWaited);
++		if (counter >= maxPolls) {
++			logger.log(Level.WARNING, "Waited for shutdown too long. Continuing." );
++		} else {
++			logger.log(Level.INFO, "Shutdown issued successfully");
++		}
++	}
++
++	@Override
++	public ServiceRuntimesInfo getRuntimesInfo() {
++		return ServiceRuntimes.getRuntimesInfo(ServiceRuntimes.getAllRuntimes());
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java
+new file mode 100755
+index 0000000..9177556
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java
+@@ -0,0 +1,53 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging;
++
++/**
++ * Default encryption: no encryption
++ * 
++ */
++public class DefaultMessageEncryption implements MessageEncryption {
++	
++	@Override
++	public String encrypt(String message) {
++		return message;
++	}
++
++	@Override
++	public String decrypt(String message) {
++		return message;
++	}
++
++
++	/*
++	// this used to be our default encryption. Leaving it in here for reference.
++	@Override
++	public String encrypt(String message) {
++		try {
++			return DatatypeConverter.printBase64Binary(message.getBytes("UTF-8"));
++		} catch (UnsupportedEncodingException e) {
++			throw new RuntimeException(e);
++		}
++	}
++
++	@Override
++	public String decrypt(String message)  {
++		try {
++			return new String(DatatypeConverter.parseBase64Binary(message), "UTF-8");
++		} catch (UnsupportedEncodingException e) {
++			throw new RuntimeException(e);
++		}
++	}
++	*/
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java
+new file mode 100755
+index 0000000..d2d84dd
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging;
++
++import java.util.concurrent.TimeoutException;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.engine.MessagingStatus;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage;
++import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
++
++
++
++public interface DiscoveryServiceQueueManager {
++	
++	void start() throws TimeoutException;
++	
++	void stop() throws TimeoutException;
++		
++	// find the next queue where this tracker should go and put it there
++	void enqueue(AnalysisRequestTracker tracker);
++	
++	void enqueueInStatusQueue(StatusQueueEntry sqe);
++	
++	void enqueueInAdminQueue(AdminMessage message);
++	
++	MessagingStatus getMessagingStatus();
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java
+new file mode 100755
+index 0000000..ad1bf28
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java
+@@ -0,0 +1,20 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging;
++
++public interface MessageEncryption {
++	String encrypt(String message);
++
++	String decrypt(String message);
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java
+new file mode 100755
+index 0000000..c71ba3c
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java
+@@ -0,0 +1,381 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.HashMap;
++import java.util.LinkedHashMap;
++import java.util.LinkedList;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.metadata.DefaultMetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++ * In-memory implementation of MetadataStore interface to be used for testing as
++ * well as for single-node ODF deployments. Uses static HashMaps for storing the
++ * metadata types and objects.
++ * 
++ * 
++ */
++public class DefaultMetadataStore extends WritableMetadataStoreBase implements WritableMetadataStore {
++	private Logger logger = Logger.getLogger(DefaultMetadataStore.class.getName());
++
++	private static final String METADATA_STORE_ID = "ODF_LOCAL_METADATA_STORE";
++	private static final String STORE_PROPERTY_TYPE = "default";
++	private static final String STORE_PROPERTY_DESCRIPTION = "ODF local metadata store";
++
++	private static HashMap<String, String> typeStore;
++	private static HashMap<String, StoredMetaDataObject> objectStore;
++	protected LinkedHashMap<String, StoredMetaDataObject> stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
++	private static boolean isInitialized = false;
++	protected static Object accessLock = new Object();
++	static Object initializationLock = new Object();
++
++	public DefaultMetadataStore() {
++		synchronized (initializationLock) {
++			if (!isInitialized) {
++				isInitialized = true;
++				this.resetAllData();
++			}
++		}
++	}
++
++	protected WritableMetadataStore getMetadataStore() {
++		return this;
++	}
++
++	protected Object getAccessLock() {
++		return accessLock;
++	}
++
++	protected HashMap<String, StoredMetaDataObject> getObjects() {
++		return objectStore;
++	}
++
++	protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects() {
++		return stagedObjects;
++	}
++
++	@Override
++    public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
++    	synchronized(accessLock) {
++    		return WritableMetadataStoreUtils.getConnectionInfo(this, informationAsset);
++    	}
++    };
++
++	@Override
++	public void resetAllData() {
++		logger.log(Level.INFO, "Resetting all data in metadata store.");
++		synchronized (accessLock) {
++			typeStore = new HashMap<String, String>();
++			objectStore = new HashMap<String, StoredMetaDataObject>();
++			createTypes(WritableMetadataStoreUtils.getBaseTypes());
++		}
++	}
++
++	@Override
++	public Properties getProperties() {
++		Properties props = new Properties();
++		props.put(MetadataStore.STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_DESCRIPTION);
++		props.put(MetadataStore.STORE_PROPERTY_TYPE, STORE_PROPERTY_TYPE);
++		props.put(STORE_PROPERTY_ID, METADATA_STORE_ID);
++		return props;
++	}
++
++	@Override
++	public String getRepositoryId() {
++		return METADATA_STORE_ID;
++	}
++
++	@Override
++	public List<MetaDataObjectReference> search(String query) {
++		if ((query == null) || query.isEmpty()) {
++			throw new MetadataStoreException("The search term cannot be null or empty.");
++		}
++		logger.log(Level.INFO, MessageFormat.format("Processing query \"{0}\".", query));
++		synchronized (accessLock) {
++			LinkedList<String> queryElements = new LinkedList<String>();
++			for (String el : query.split(DefaultMetadataQueryBuilder.SEPARATOR_STRING)) {
++				queryElements.add(el);
++			}
++			List<MetaDataObjectReference> result = new ArrayList<MetaDataObjectReference>();
++			String firstOperator = queryElements.removeFirst();
++
++			if (firstOperator.equals(DefaultMetadataQueryBuilder.DATASET_IDENTIFIER)) {
++				String requestedObjectType = queryElements.removeFirst();
++				for (StoredMetaDataObject currentInternalObject : getObjects().values()) {
++					MetaDataObject currentObject = currentInternalObject.getMetaDataObject();
++					String currentObjectType = getObjectType(currentObject);
++					try {
++						if (isSubTypeOf(requestedObjectType, currentObjectType)
++								&& isConditionMet(currentObject, queryElements)) {
++							result.add(currentObject.getReference());
++						}
++					} catch (IllegalArgumentException | IllegalAccessException e) {
++						throw new MetadataStoreException(
++								MessageFormat.format("Error processing \"{0}\" clause of query.",
++										DefaultMetadataQueryBuilder.DATASET_IDENTIFIER));
++					}
++				}
++				return result;
++			} else {
++				throw new MetadataStoreException(MessageFormat.format("Query ''{0}'' is not valid.", query));
++			}
++		}
++	}
++
++	@Override
++	public void createSampleData() {
++		logger.log(Level.INFO, "Creating sample data in metadata store.");
++		SampleDataHelper.copySampleFiles();
++		WritableMetadataStoreUtils.createSampleDataObjects(this);
++	}
++
++	@Override
++	public AnnotationPropagator getAnnotationPropagator() {
++		return new AnnotationPropagator() {
++
++			@Override
++			public void propagateAnnotations(AnnotationStore as, String requestId) {
++				List<Annotation> annotations = as.getAnnotations(null, requestId);
++				for (Annotation annot : annotations) {
++					ensureAnnotationTypeExists(annot);
++					annot.setReference(null); // Set reference to null because a new reference will be generated by the metadata store
++					getMetadataStore().createObject(annot);
++					commit();
++				}
++			}
++		};
++	}
++
++	/**
++	 * Internal helper that creates a list of types in the metadata store.
++	 *
++	 * @param typeList List of types to be created
++	 */
++	private void createTypes(List<Class<?>> typeList) {
++		synchronized (accessLock) {
++			for (Class<?> type : typeList) {
++				if (!typeStore.containsKey(type.getSimpleName())) {
++					logger.log(Level.INFO,
++							MessageFormat.format("Creating new type \"{0}\" in metadata store.", type.getSimpleName()));
++					typeStore.put(type.getSimpleName(), type.getSuperclass().getSimpleName());
++				} else {
++					throw new MetadataStoreException(MessageFormat.format(
++							"A type with the name \"{0}\" already exists in this metadata store.", type.getName()));
++				}
++			}
++		}
++	};
++
++	/**
++	 * Internal helper that returns the type name of a given metadata object.
++	 *
++	 * @param mdo Metadata object
++	 * @return Type name 
++	 */
++	protected String getObjectType(MetaDataObject mdo) {
++		if (mdo instanceof Annotation) {
++			// Important when using the MetadataStore as an AnnotationStore
++			return ((Annotation) mdo).getAnnotationType();
++		} else {
++			return mdo.getClass().getSimpleName();
++		}
++	}
++
++	/**
++	 * Internal helper that checks if a type is a sub type of another type 
++	 *
++	 * @param subTypeName Name of the type that is supposed to be the sub type
++	 * @param parentTypeName Name of the type that is supposed to be the parent type
++	 */
++	private boolean isSubTypeOf(String subTypeName, String parentTypeName) {
++		if (subTypeName.equals(parentTypeName)) {
++			return true;
++		}
++		if (typeStore.get(parentTypeName) != null) {
++			String parent = typeStore.get(parentTypeName);
++			if ((parent != null) && (!parent.equals(parentTypeName))) {
++				if (isSubTypeOf(subTypeName, parent)) {
++					return true;
++				}
++			}
++		}
++		return false;
++	}
++
++	/**
++	 * Internal helper that checks if the attributes of a given metadata object meet a given condition. 
++	 *
++	 * @param mdo Metadata object
++	 * @param condition List of tokens that make up the condition phrase
++	 */
++	private boolean isConditionMet(MetaDataObject mdo, LinkedList<String> condition)
++			throws IllegalArgumentException, IllegalAccessException {
++		if (condition.isEmpty()) {
++			return true;
++		}
++		LinkedList<String> clonedCondition = new LinkedList<String>();
++		clonedCondition.addAll(condition);
++		try {
++			JSONObject mdoJson = JSONUtils.toJSONObject(mdo);
++			logger.log(Level.FINER, MessageFormat.format("Evaluating object \"{0}\".", mdoJson));
++			while (clonedCondition.size() >= 4) {
++				// Each condition clause consists of four elements, e.g. "where
++				// name = 'BankClientsShort'" or "and name = 'BankClientsShort'"
++				String operator = clonedCondition.removeFirst();
++				String attribute = clonedCondition.removeFirst();
++				String comparator = clonedCondition.removeFirst();
++				String expectedValueWithQuotes = clonedCondition.removeFirst();
++				while ((!expectedValueWithQuotes.endsWith(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) && (clonedCondition.size() != 0)) {
++					expectedValueWithQuotes = expectedValueWithQuotes + DefaultMetadataQueryBuilder.SEPARATOR_STRING + clonedCondition.removeFirst();
++				}
++				if (operator.equals(DefaultMetadataQueryBuilder.CONDITION_PREFIX)
++						|| operator.equals(DefaultMetadataQueryBuilder.AND_IDENTIFIER)) {
++					if (mdoJson.containsKey(attribute)) {
++						String actualValue = (String) mdoJson.get(attribute) != null ? mdoJson.get(attribute).toString() : null;
++						if (comparator.equals(DefaultMetadataQueryBuilder.EQUALS_IDENTIFIER)) {
++							if (!expectedValueWithQuotes.equals(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER + actualValue + DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) {
++								// Condition is not met
++								return false;
++							}
++						} else if (comparator.equals(DefaultMetadataQueryBuilder.NOT_EQUALS_IDENTIFIER)) {
++							if (expectedValueWithQuotes.equals(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER + actualValue + DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) {
++								// Condition is not met
++								return false;
++							}
++						} else {
++							throw new MetadataStoreException(
++									MessageFormat.format("Unknown comparator \"{0}\" in query condition \"{1}\".",
++											new Object[] { comparator, condition.toString() }));
++						}
++					} else {
++						logger.log(Level.INFO,
++								MessageFormat.format("The object does not contain attribute \"{0}\".", attribute));
++						// Condition is not met
++						return false;
++					}
++				} else {
++					throw new MetadataStoreException(
++							MessageFormat.format("Syntax error in query condition \"{0}\".", condition.toString()));
++				}
++			}
++			if (clonedCondition.size() != 0) {
++				throw new MetadataStoreException(
++						MessageFormat.format("Error parsing trailing query elements \"{0}\".", clonedCondition));
++			}
++			// All conditions are met
++			return true;
++		} catch (JSONException e) {
++			throw new MetadataStoreException(MessageFormat.format("Error parsing JSON object {0} in query.", mdo), e);
++		}
++	}
++
++	/**
++	 * Internal helper that merges the references of a staged metadata object with the references of the current metadata object
++	 * stored in the metadata store. The missing references are added to the provided object in place.
++	 *
++	 * @param object Internal representation of a staged metadata object
++	 */
++	private void mergeReferenceMap(StoredMetaDataObject object) {
++		HashMap<String, List<MetaDataObjectReference>> mergedObjectRefMap = new HashMap<String, List<MetaDataObjectReference>>();
++		String objectId = object.getMetaDataObject().getReference().getId();
++		if (getObjects().get(objectId) != null) {
++			// Only merge if the object already exists in the metadata store
++			HashMap<String, List<MetaDataObjectReference>> originalRefMap = getObjects().get(objectId)
++					.getReferenceMap(); // Get reference map of exiting object
++			HashMap<String, List<MetaDataObjectReference>> updatedObjectRefMap = object.getReferenceMap();
++			for (String referenceId : updatedObjectRefMap.keySet()) {
++				// Update original reference map in place
++				mergedObjectRefMap.put(referenceId,
++						InternalMetaDataUtils.mergeReferenceLists(originalRefMap.get(referenceId), updatedObjectRefMap.get(referenceId)));
++			}
++			object.setReferencesMap(mergedObjectRefMap);
++		}
++	}
++
++	@Override
++	public void commit() {
++		synchronized (accessLock) {
++			// Check if all required types exist BEFORE starting to create the
++			// objects in order to avoid partial creation of objects
++			for (Map.Entry<String, StoredMetaDataObject> mapEntry : this.stagedObjects.entrySet()) {
++				String typeName = getObjectType(mapEntry.getValue().getMetaDataObject());
++				if ((typeName == null) || !typeStore.containsKey(typeName)) {
++					throw new MetadataStoreException(MessageFormat.format(
++							"The type \"{0}\" of the object you are trying to create does not exist in this metadata store.",
++							typeName));
++				}
++			}
++
++			// Move objects from staging area into metadata store
++			for (Map.Entry<String, StoredMetaDataObject> mapEntry : this.stagedObjects.entrySet()) {
++				StoredMetaDataObject object = mapEntry.getValue();
++				String typeName = getObjectType(mapEntry.getValue().getMetaDataObject());
++				logger.log(Level.INFO,
++						MessageFormat.format(
++								"Creating or updating object with id ''{0}'' and type ''{1}'' in metadata store.",
++								new Object[] { object.getMetaDataObject().getReference(), typeName }));
++				String objectId = object.getMetaDataObject().getReference().getId();
++				mergeReferenceMap(object); // Merge new object references with
++											// existing object references in
++											// metadata store
++				getObjects().put(objectId, object);
++			}
++
++			// Clear staging area
++			stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
++		}
++	}
++
++	/**
++	 * Internal helper that creates a new annotation type in the internal type store if it does not yet exist.
++	 *
++	 * @param mds Metadata store to operate on
++	 */
++	private void ensureAnnotationTypeExists(Annotation annotation) {
++		String annotationType = annotation.getAnnotationType();
++		if (typeStore.get(annotationType) == null) {
++			if (annotation instanceof ProfilingAnnotation) {
++				typeStore.put(annotationType, "ProfilingAnnotation");
++			} else if (annotation instanceof ClassificationAnnotation) {
++				typeStore.put(annotationType, "ClassificationAnnotation");
++			} else if (annotation instanceof RelationshipAnnotation) {
++				typeStore.put(annotationType, "RelationshipAnnotation");
++			}
++		}
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java
+new file mode 100755
+index 0000000..4bccd6c
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java
+@@ -0,0 +1,181 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import java.sql.Connection;
++import java.sql.DatabaseMetaData;
++import java.sql.DriverManager;
++import java.sql.ResultSet;
++import java.sql.SQLException;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Date;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++import java.util.Set;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
++import org.apache.atlas.odf.api.metadata.importer.MetadataImportException;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++
++public class JDBCMetadataImporterImpl implements JDBCMetadataImporter {
++	Logger logger = Logger.getLogger(JDBCMetadataImporterImpl.class.getName());
++	private WritableMetadataStore mds;
++	WritableMetadataStoreUtils mdsUtils;
++
++	public JDBCMetadataImporterImpl() {
++		MetadataStore currentMds = new ODFFactory().create().getMetadataStore();
++		if (currentMds instanceof WritableMetadataStore) {
++			this.mds = (WritableMetadataStore) currentMds;
++		} else {
++			String errorText = "Cannot import data because metadata store ''{0}'' does not support the WritableMetadataStore interface.";
++			throw new RuntimeException(MessageFormat.format(errorText , currentMds.getClass()));
++		}
++	}
++
++	@Override
++	public JDBCMetadataImportResult importTables(JDBCConnection connection, String dbName, String schemaPattern, String tableNamePattern)  {
++		Connection conn = null;
++		try {
++			logger.log(Level.FINE, "Importing tables...");
++			conn = DriverManager.getConnection(connection.getJdbcConnectionString(), connection.getUser(), connection.getPassword());
++			DatabaseMetaData dmd = conn.getMetaData();
++			List<MetaDataObjectReference> matchingDatabases = mds.search(mds.newQueryBuilder().objectType("Database").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, dbName).build());
++			Database odfDatabase = null;
++			if (!matchingDatabases.isEmpty()) {
++				odfDatabase = (Database) mds.retrieve(matchingDatabases.get(0));
++				mds.updateObject(odfDatabase);
++			} else {
++				odfDatabase = new Database();
++				List<MetaDataObjectReference> conList = new ArrayList<MetaDataObjectReference>();
++				odfDatabase.setConnections(conList);
++				odfDatabase.setName(dbName);
++				odfDatabase.setDbType(dmd.getDatabaseProductName());
++				odfDatabase.setDescription("Database " + dbName + " imported by JDBC2AtlasImporter on " + new Date());
++				mds.createObject(odfDatabase);
++			}
++			Map<String, Table> tableMap = new HashMap<String, Table>();
++			Map<String, Schema> schemaMap = new HashMap<>();
++			List<MetaDataObjectReference> schemaList = new ArrayList<MetaDataObjectReference>();
++			Set<String> tableNames = new HashSet<>();
++			ResultSet columnRS = dmd.getColumns(null, schemaPattern, tableNamePattern, null);
++			while (columnRS.next()) {
++				String columnName = columnRS.getString("COLUMN_NAME");
++				String schemaName = columnRS.getString("TABLE_SCHEM");
++				String tableName = columnRS.getString("TABLE_NAME");
++				String dataType = columnRS.getString("TYPE_NAME");
++				
++				Schema schema = schemaMap.get(schemaName);
++				if (schema == null) {
++					for (Schema s : mds.getSchemas(odfDatabase)) {
++						if (schemaName.equals(s.getName())) {
++							schema = s;
++							mds.updateObject(schema);
++							break;
++						}
++					}
++					if (schema == null) {
++						schema = new Schema();
++						schema.setName(schemaName);
++						schemaList.add(mds.createObject(schema));
++					}
++					schemaMap.put(schemaName, schema);
++					mds.addSchemaReference(odfDatabase, schema.getReference());
++				}
++				
++				String key = schemaName + "." + tableName;
++				Table tableObject = tableMap.get(key);
++				if (tableObject == null) {
++					for (Table t : mds.getTables(schema)) {
++						if (tableName.equals(t.getName())) {
++							tableObject = t;
++							mds.updateObject(tableObject);
++							break;
++						}
++					}
++					if (tableObject == null) {
++						tableObject = new Table();
++						tableObject.setName(tableName);
++						MetaDataObjectReference ref = mds.createObject(tableObject);
++						tableObject.setReference(ref);
++					}
++					tableNames.add(tableName);
++					tableMap.put(key, tableObject);
++					mds.addTableReference(schema, tableObject.getReference());
++				}
++				Column column = null;
++				for (Column c : mds.getColumns(tableObject)) {
++					if (columnName.equals(c.getName())) {
++						column = c;
++						break;
++					}
++				}
++				if (column == null) {
++					// Add new column only if a column with the same name does not exist
++					column = WritableMetadataStoreUtils.createColumn(columnName, dataType, null);
++					mds.createObject(column);
++				}
++				mds.addColumnReference(tableObject, column.getReference());
++			}
++			columnRS.close();
++			logger.log(Level.INFO, "Found {0} tables in database ''{1}'': ''{2}''", new Object[]{tableMap.keySet().size(), dbName, tableNames });
++
++			JDBCConnection odfConnection = null;
++			for (MetaDataObject c : mds.getConnections(odfDatabase)) {
++				if ((c instanceof JDBCConnection) && connection.getJdbcConnectionString().equals(((JDBCConnection) c).getJdbcConnectionString())) {
++					odfConnection = (JDBCConnection) c;
++					mds.updateObject(odfConnection);
++					break;
++				}
++			}
++			if (odfConnection == null) {
++				odfConnection = new JDBCConnection();
++				odfConnection.setJdbcConnectionString(connection.getJdbcConnectionString());
++				odfConnection.setUser(connection.getUser());
++				odfConnection.setPassword(connection.getPassword());
++				odfConnection.setDescription("JDBC connection for database " + dbName);
++				mds.createObject(odfConnection);
++			}
++			mds.addConnectionReference(odfDatabase, odfConnection.getReference());
++
++			mds.commit();
++			return new JDBCMetadataImportResult(dbName, odfDatabase.getReference().getId(), new ArrayList<String>( tableMap.keySet() ));
++		} catch (SQLException exc) {
++			throw new MetadataImportException(exc);
++		} finally {
++			if (conn != null) {
++				try {
++					conn.close();
++				} catch (SQLException e) {
++					e.printStackTrace();
++				}
++			}
++		}
++
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java
+new file mode 100755
+index 0000000..9169d8a
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java
+@@ -0,0 +1,67 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import java.io.FileOutputStream;
++import java.io.IOException;
++import java.io.InputStream;
++import java.text.MessageFormat;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.Utils;
++
++public class SampleDataHelper {
++	private static Logger logger = Logger.getLogger(SampleDataHelper.class.getName());
++	private static final String SAMPLE_DATA_FILE_LIST = "sample-data-toc.properties";
++	private static final String SAMPLE_DATA_FILE_FOLDER = "org/apache/atlas/odf/core/metadata/internal/sampledata/";
++
++	public static void copySampleFiles() {
++		Properties toc = new Properties();
++		ClassLoader cl = SampleDataHelper.class.getClassLoader();
++		try {
++			toc.load(cl.getResourceAsStream(SAMPLE_DATA_FILE_FOLDER + SAMPLE_DATA_FILE_LIST));
++
++			for (String contentFileName : toc.stringPropertyNames()) {
++				logger.log(Level.INFO, "Processing sample file: {0}", contentFileName);
++				String url = copySampleDataFileContents(cl.getResourceAsStream(SAMPLE_DATA_FILE_FOLDER + contentFileName), contentFileName);
++				logger.log(Level.INFO, "Sample data file ''{0}'' copied to {1}", new Object[] { contentFileName, url });
++			}
++		} catch(IOException e) {
++			logger.log(Level.FINE, "An unexpected exception ocurred while connecting to Atlas", e);
++			String messageText = MessageFormat.format("Content file list {0} could not be accessed.", SAMPLE_DATA_FILE_FOLDER + SAMPLE_DATA_FILE_LIST);
++			throw new RuntimeException(messageText, e);
++		}
++		logger.log(Level.INFO, "All sample data files created");
++	}
++
++	private static String copySampleDataFileContents(InputStream is, String contentFile) throws IOException {
++		String url = null;
++		String target = null;
++		String os = System.getProperty("os.name").toLowerCase();
++		if (os.startsWith("windows")) {
++			url = "file://localhost/c:/tmp/" + contentFile;
++			target = "c:/tmp/" + contentFile;
++		} else {
++			url = "file:///tmp/" + contentFile;
++			target = "/tmp/" + contentFile;
++		}
++		String content = Utils.getInputStreamAsString(is, "UTF-8");
++		FileOutputStream fos = new FileOutputStream(target);
++		fos.write(content.getBytes("UTF-8"));
++		fos.close();
++		return url;
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java
+new file mode 100755
+index 0000000..8cc56d6
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java
+@@ -0,0 +1,111 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++
++/**
++ * Interface to be implemented by metadata stores that support write access, i.e. the creation of new metadata objects,
++ * update of existing metadata objects, and creation of references between metadata objects. The new or updated objects
++ * and references remain in a staging area until they are committed. This is necessary in order to avoid inconsistent
++ * states during comprehensive write operations.  
++ * 
++ *
++ */
++public interface WritableMetadataStore extends MetadataStore {
++
++	/**
++	 * Add a new metadata object to the staging area of the metadata store.
++	 * If the object already has a reference, the reference id might be changed when committing the new object.  
++	 *
++	 * @param metaDataObject Metadata object
++	 */
++	public MetaDataObjectReference createObject(MetaDataObject metaDataObject);
++
++	/**
++	 * Add an updated metadata object to the staging area of the metadata store. The object reference must point to an
++	 * existing object in the metadata store.  
++	 *
++	 * @param metaDataObject Metadata object
++	 */
++	public void updateObject(MetaDataObject metaDataObject);
++
++	/**
++	 * Apply all staged changes to the metadata store.
++	 *
++	 */
++	public void commit();
++
++	/**
++	 * Add a data file reference to an updated or new data file folder in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param folder Data file folder to add the reference to
++	 * @param reference Reference of the data file to be added to the folder 
++	 */
++	public void addDataFileReference(DataFileFolder folder, MetaDataObjectReference reference);
++
++	/**
++	 * Add a data file folder reference to an updated or new data file folder in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param folder Data file folder to add the reference to
++	 * @param reference Reference of the data file folder to be added to the folder 
++	 */
++	public void addDataFileFolderReference(DataFileFolder folder, MetaDataObjectReference reference);
++
++	/**
++	 * Add a schema reference to an updated or new database in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param database Database to add the reference to
++	 * @param reference Reference of the schema to be added to the database 
++	 */
++	public void addSchemaReference(Database database, MetaDataObjectReference reference);
++
++	/**
++	 * Add a table reference to an updated or new schema in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param schema Schema to add the reference to
++	 * @param reference Reference of the table to be added to the schema 
++	 */
++	public void addTableReference(Schema schema, MetaDataObjectReference reference);
++
++	/**
++	 * Add a column reference to an updated or new relational data set in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param relationalDataSet Relational data set to add the reference to
++	 * @param reference Reference of the column to be added to the relational data set 
++	 */
++	public void addColumnReference(RelationalDataSet relationalDataSet, MetaDataObjectReference reference);
++
++	/**
++	 * Add a connection reference to an updated or new data store in the staging area.
++	 * The new reference will be merged with existing references during the commit operation.
++	 *
++	 * @param dataStore Data store set to add the reference to
++	 * @param reference Reference of the connection to be added to the data store 
++	 */
++	public void addConnectionReference(DataStore dataStore, MetaDataObjectReference reference);
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java
+new file mode 100755
+index 0000000..d5f8772
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java
+@@ -0,0 +1,117 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.LinkedHashMap;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.InternalMetadataStoreBase;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++
++/**
++ * Common base for writable metadata stores.
++ * Note that the methods implemented by InternalMetadataStoreBase are not necessarily used by all classes that extend WritableMetadataStoreBase.
++ * (If Java supported multiple inheritance, WritableMetadataStoreBase and InternalMetadataStoreBase would be independent classes.)
++ * 
++ * 
++ */
++public abstract class WritableMetadataStoreBase extends InternalMetadataStoreBase implements WritableMetadataStore {
++	private static Logger logger = Logger.getLogger(WritableMetadataStoreBase.class.getName());
++
++	abstract protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects();
++
++	private void addReference(MetaDataObject metaDataObject, String attributeName, MetaDataObjectReference reference) {
++		if (metaDataObject.getReference() == null) {
++			throw new MetadataStoreException("Cannot add a reference because metadata object reference is null.");
++		}
++		StoredMetaDataObject obj = this.getStagedObjects().get(metaDataObject.getReference().getId());
++		if (obj != null) {
++			if (obj.getReferenceMap().get(attributeName) == null) {
++				obj.getReferenceMap().put(attributeName, new ArrayList<MetaDataObjectReference>());
++			}
++			obj.getReferenceMap().get(attributeName).add(reference);
++		} else {
++			String errorMessage = MessageFormat.format("A staged object with id ''{0}'' does not exist. Create or update the object before adding a reference.", metaDataObject.getReference().getId());
++			throw new MetadataStoreException(errorMessage);
++		}
++	}
++
++	@Override
++	public void addDataFileReference(DataFileFolder folder, MetaDataObjectReference reference) {
++		addReference(folder, ODF_DATAFILES_REFERENCE, reference);
++	}
++
++	@Override
++	public void addDataFileFolderReference(DataFileFolder folder, MetaDataObjectReference reference) {
++		addReference(folder, ODF_DATAFILEFOLDERS_REFERENCE, reference);
++	}
++
++	@Override
++	public void addSchemaReference(Database database, MetaDataObjectReference reference) {
++		addReference(database, ODF_SCHEMAS_REFERENCE, reference);
++	}
++
++	@Override
++	public void addTableReference(Schema schema, MetaDataObjectReference reference) {
++		addReference(schema, ODF_TABLES_REFERENCE, reference);
++	}
++
++	@Override
++	public void addColumnReference(RelationalDataSet relationalDataSet, MetaDataObjectReference reference) {
++		addReference(relationalDataSet, ODF_COLUMNS_REFERENCE, reference);
++	}
++
++	@Override
++	public void addConnectionReference(DataStore dataStore, MetaDataObjectReference reference) {
++		addReference(dataStore, ODF_CONNECTIONS_REFERENCE, reference);
++	}
++
++	@Override
++	public MetaDataObjectReference createObject(MetaDataObject metaDataObject) {
++		if (metaDataObject.getReference() == null) {
++			metaDataObject.setReference(WritableMetadataStoreUtils.generateMdoRef(this));
++		}
++		this.getStagedObjects().put(metaDataObject.getReference().getId(), new StoredMetaDataObject(metaDataObject));
++		logger.log(Level.FINE, "Added new new object of type ''{0}'' with id ''{1}'' to staging area.",
++				new Object[] { metaDataObject.getClass().getSimpleName(), metaDataObject.getReference().getId() });
++		return metaDataObject.getReference();
++	}
++
++	@Override
++	public void updateObject(MetaDataObject metaDataObject) {
++		if (metaDataObject.getReference() == null) {
++			throw new MetadataStoreException("Reference attribute cannot be ''null'' when updating a metadata object.");
++		}
++		if (retrieve(metaDataObject.getReference()) == null) {
++			throw new MetadataStoreException(
++					MessageFormat.format("An object wih id ''{0}'' does not extist in this metadata store.",
++							metaDataObject.getReference().getId()));
++		}
++		this.getStagedObjects().put(metaDataObject.getReference().getId(), new StoredMetaDataObject(metaDataObject));
++		logger.log(Level.FINE, "Added updated object of type ''{0}'' with id ''{1}'' to staging area.",
++				new Object[] { metaDataObject.getClass().getSimpleName(), metaDataObject.getReference().getId() });
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java
+new file mode 100755
+index 0000000..808b4d2
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java
+@@ -0,0 +1,297 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.UUID;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.BusinessTerm;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.DataStore;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.Document;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++
++/**
++ * Utilities to be used for implementing the {@link WritableMetadataStore} interface, i.e. for
++ * adding support for an additional writable metadata store to ODF.
++ *
++ *
++ */
++public class WritableMetadataStoreUtils {
++
++	/**
++	 * Utility method for creating an populating a new {@link Column} object. The object will have a generated reference
++	 * that uses a random id and points to a given metadata store.
++	 *
++	 * @param mds Metadata store to which the reference of the new column should point.
++	 * @param name Name of the new column
++	 * @param dataType Data type of the new column
++	 * @param description Description of the new column
++	 * @return The resulting column object
++	 */
++	public static Column createColumn(String name, String dataType, String description) {
++		Column column = new Column();
++		column.setName(name);
++		column.setDescription(description);
++		column.setDataType(dataType);
++		return column;
++	}
++
++	public static String getFileUrl(String shortFileName) {
++		if (System.getProperty("os.name").toLowerCase().startsWith("windows")) {
++			return "file://localhost/c:/tmp/" + shortFileName;
++		} else {
++			return "file:///tmp/" + shortFileName;
++		}
++	}
++
++	/**
++	 * Utility method for genrating a new metadata object reference that uses a random id and points
++	 * to a given metadata store.
++	 *
++	 * @param mds Metadata store to which the new reference should point
++	 * @return The resulting metadata object reference
++	 */
++	public static MetaDataObjectReference generateMdoRef(MetadataStore mds) {
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId(UUID.randomUUID().toString());
++		ref.setRepositoryId(mds.getRepositoryId());
++		ref.setUrl("");
++		return ref;
++	}
++
++	/**
++	 * Utility method providing the list of ODF example objects used for the ODF integration tests.
++	 * The references of the example objects point to a given metadata store.
++	 *
++	 * @param mds Metadata store
++	 * @return List of example objects
++	 */
++	public static void createSampleDataObjects(WritableMetadataStore mds) {
++		DataFile bankClients = new DataFile();
++		bankClients.setName("BankClientsShort");
++		bankClients.setDescription("A reduced sample data file containing bank clients.");
++		bankClients.setUrlString(getFileUrl("bank-clients-short.csv"));
++		mds.createObject(bankClients);
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("CLIENT_ID", "string", "A client ID (column 1)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("NAME", "string", "A client name (column 2)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("ADDRESS", "string", "A client's address (column 3)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("ZIP", "string", "Zip code (column 4)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("AGE", "double", "Age in years (column 5)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("GENDER", "string", "Person gender (column 6)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("MARITAL_STATUS", "string", "Marital status (column 7)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("PROFESSION", "string", "Profession (column 8)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("NBR_YEARS_CLI", "double", "The number of years how long the client has been with us (column 9)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("SAVINGS_ACCOUNT", "string", "Savings account number (column 10)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("ONLINE_ACCESS", "string", "A flag indicating if the client accesses her accounts online (column 11)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("JOINED_ACCOUNTS", "string", "A flag indicating if the client has joined accounts (column 12)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("BANKCARD", "string", "A flag indicating if the client has a bankcard (column 13)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("AVERAGE_BALANCE", "double", "The average balance over the last year (column 14)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("ACCOUNT_ID", "int", "Account Id / number (column 15)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("ACCOUNT_TYPE", "string", "Type of account (column 16)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("EMAIL", "string", "A flag indicating if the client has joined accounts (column 17)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("CCN", "string", "Credit card number (column 18)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("PHONE1", "string", "Primary hone number (column 19)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("PHONE2", "string", "Secondary phone number (column 20)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("CC", "string", "CC indicator (column 21)")));
++		mds.addColumnReference(bankClients, mds.createObject(createColumn("CONTACT", "string", "Contact in case of emergency (column 22)")));
++
++		DataFile simpleExampleTable = new DataFile();
++		simpleExampleTable.setName("SimpleExampleTable");
++		simpleExampleTable.setDescription("A very simple example document referring to a local file.");
++		simpleExampleTable.setUrlString(getFileUrl("simple-example-table.csv"));
++		mds.createObject(simpleExampleTable);
++		mds.addColumnReference(simpleExampleTable, mds.createObject(createColumn("ColumnName1", "string", null)));
++		mds.addColumnReference(simpleExampleTable, mds.createObject(createColumn("ColumnName2", "int", null)));
++
++		Document simpleExampleURLDocument = new Document();
++		simpleExampleURLDocument.setName("Simple URL example document");
++		simpleExampleURLDocument.setDescription("A very simple example document referring to a publicly available URL");
++		simpleExampleURLDocument.setUrlString("https://www.wikipedia.org");
++		simpleExampleURLDocument.setEncoding("ASCII");
++		mds.createObject(simpleExampleURLDocument);
++
++		Document simpleExampleDocument = new Document();
++		simpleExampleDocument.setName("Simple local example document");
++		simpleExampleDocument.setDescription("A very simple example document referring to a local file");
++		simpleExampleDocument.setUrlString(getFileUrl("simple-example-document.txt"));
++		simpleExampleDocument.setEncoding("ASCII");
++		mds.createObject(simpleExampleDocument);
++
++		BusinessTerm bankClientTerm1 = new BusinessTerm();
++		bankClientTerm1.setName("Address");
++		bankClientTerm1.setDescription("The mail address of a person or organization");
++		bankClientTerm1.setAbbreviations(Arrays.asList(new String[] { "Addr" }));
++		bankClientTerm1.setExample("257 Great Lister Street P O BOX 1107 Birmingham");
++		bankClientTerm1.setUsage("Outgoing mail (physical).");
++		mds.createObject(bankClientTerm1);
++
++		BusinessTerm bankClientTerm2a = new BusinessTerm();
++		bankClientTerm2a.setName("Marital Status");
++		bankClientTerm2a.setDescription("The marital status of a person (single, married, divorced, or other).");
++		bankClientTerm2a.setAbbreviations(Arrays.asList(new String[] { "MS","MAST" }));
++		bankClientTerm2a.setExample("single");
++		bankClientTerm2a.setUsage("Contracting");
++		mds.createObject(bankClientTerm2a);
++
++		BusinessTerm bankClientTerm2b = new BusinessTerm();
++		bankClientTerm2b.setReference(generateMdoRef(mds));
++		bankClientTerm2b.setName("Marital Status");
++		bankClientTerm2b.setDescription("2nd term representing the marital status of a person.");
++		bankClientTerm2b.setAbbreviations(Arrays.asList(new String[] { "MS","MAST" }));
++		bankClientTerm2b.setExample("married");
++		bankClientTerm2b.setUsage("Human Resources");
++		mds.createObject(bankClientTerm2b);
++
++		BusinessTerm bankClientTerm3 = new BusinessTerm();
++		bankClientTerm3.setName("AVG Balance");
++		bankClientTerm3.setDescription("The average balance of an account over an amount of time, typically a year. Unit: Dollars.");
++		bankClientTerm3.setAbbreviations(Arrays.asList(new String[] { "AB","AVGB","AVGBAL" }));
++		bankClientTerm3.setExample("1000");
++		bankClientTerm3.setUsage("Contracting");
++		bankClientTerm3.setOriginRef("test-pointer-to-igc");
++		bankClientTerm3.setReplicaRefs(Arrays.asList(new String[] { "first-replica-pointer", "second-replica-pointer" }));
++		mds.createObject(bankClientTerm3);
++
++		BusinessTerm bankClientTerm4 = new BusinessTerm();
++		bankClientTerm4.setName("LASTNAME");
++		bankClientTerm4.setDescription("Last name of a person");
++		bankClientTerm4.setAbbreviations(Arrays.asList(new String[] { "LASTNME" }));
++		bankClientTerm4.setExample("1000");
++		bankClientTerm4.setUsage("Contracting");
++		mds.createObject(bankClientTerm4);
++
++		BusinessTerm bankClientTerm5a = new BusinessTerm();
++		bankClientTerm5a.setReference(generateMdoRef(mds));
++		bankClientTerm5a.setName("Credit Card Number");
++		bankClientTerm5a.setDescription("Credit card number of a customer");
++		bankClientTerm5a.setAbbreviations(Arrays.asList(new String[] { "CreNum", "CCN" }));
++		bankClientTerm5a.setExample("1234567");
++		bankClientTerm5a.setUsage("Contracting");
++		mds.createObject(bankClientTerm5a);
++
++		BusinessTerm bankClientTerm5b = new BusinessTerm();
++		bankClientTerm5b.setReference(generateMdoRef(mds));
++		bankClientTerm5b.setName("Credit Card Number");
++		bankClientTerm5b.setDescription("Credit card number of an employee");
++		bankClientTerm5b.setAbbreviations(Arrays.asList(new String[] {}));      // this one has no abbreviations
++		bankClientTerm5b.setExample("1234567");
++		bankClientTerm5b.setUsage("Human Resources");
++		mds.createObject(bankClientTerm5b);
++
++		BusinessTerm bankClientTermDataSetLevel = new BusinessTerm();
++		bankClientTermDataSetLevel.setName("Bank Clients");
++		bankClientTermDataSetLevel.setDescription("The only purpose of this term is to match the name of the data set BankClientsShort");
++		bankClientTermDataSetLevel.setAbbreviations(Arrays.asList(new String[] { "BC" }));
++		bankClientTermDataSetLevel.setExample("<none>");
++		bankClientTermDataSetLevel.setUsage("Integration testing of TermMatcher discovery service. Yields confidence value of 56.");
++		mds.createObject(bankClientTermDataSetLevel);
++
++		mds.commit();
++	}
++
++	/**
++	 * Utility method that returns the list of ODF base types that need to be supported by a metadata store in order to be used with ODF.
++	 *
++	 * @return List of the ODF base types
++	 */
++	public static final List<Class<?>> getBaseTypes() {
++		List<Class<?>> typeList = new ArrayList<Class<?>>();
++		typeList.add(MetaDataObject.class);
++		typeList.add(DataStore.class);
++		typeList.add(Database.class);
++		typeList.add(Connection.class);
++		typeList.add(JDBCConnection.class);
++		typeList.add(DataSet.class);
++		typeList.add(UnknownDataSet.class);
++		typeList.add(RelationalDataSet.class);
++		typeList.add(Column.class);
++		typeList.add(Table.class);
++		typeList.add(Schema.class);
++		typeList.add(DataFileFolder.class);
++		typeList.add(DataFile.class);
++		typeList.add(Document.class);
++		typeList.add(Annotation.class);
++		typeList.add(ProfilingAnnotation.class);
++		typeList.add(ClassificationAnnotation.class);
++		typeList.add(RelationshipAnnotation.class);
++		typeList.add(BusinessTerm.class);
++		return typeList;
++	}
++
++	/**
++	* Utility method that returns a connection info object for a given information asset.
++	*
++	* @return Connection info object
++	*/
++    public static ConnectionInfo getConnectionInfo(MetadataStore mds, MetaDataObject informationAsset) {
++		if (informationAsset instanceof Table) {
++			Schema schema = getParentOfType(mds, informationAsset, Schema.class);
++			Database database = getParentOfType(mds, schema, Database.class);
++			JDBCConnectionInfo jdbcConnectionInfo = new JDBCConnectionInfo();
++			jdbcConnectionInfo.setSchemaName(schema.getName());
++			jdbcConnectionInfo.setTableName(informationAsset.getName());
++			jdbcConnectionInfo.setConnections(mds.getConnections(database));
++			jdbcConnectionInfo.setAssetReference(informationAsset.getReference());
++            return jdbcConnectionInfo;
++        }
++		return null;
++    };
++
++    /**
++	 * Utility to return the parent of a metadata object casted to a given type.
++	 * An exception is thrown if the types don't match.
++	 *
++	 * @param mds Metadata store
++	 * @param metaDataObject Metadata object
++	 * @param type Class to which the parent should be casted
++	 * @return Parent object of the given metadata object
++	 */
++	public static <T> T getParentOfType(MetadataStore mds, MetaDataObject metaDataObject, Class<T> type) {
++		MetaDataObject parent = mds.getParent(metaDataObject);
++		if (parent == null) {
++			String errorMessage = MessageFormat.format("Cannot extract connection info for object id ''{0}'' because the parent object is null.", metaDataObject.getReference().getId());
++			throw new MetadataStoreException(errorMessage);
++		}
++		if (!type.isInstance(parent)) {
++			String errorMessage = MessageFormat.format("Parent of object ''{0}'' is expected to be of type ''{1}'' but is ''{2}''",
++					new Object[] { metaDataObject.getReference().getId(), type.getSimpleName(), parent.getClass().getName() });
++	        throw new MetadataStoreException(errorMessage);
++		}
++		return type.cast(parent);
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java
+new file mode 100755
+index 0000000..f2f95ff
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java
+@@ -0,0 +1,26 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.notification;
++
++import java.util.ArrayList;
++import java.util.List;
++
++public class DefaultNotificationManager implements NotificationManager {
++
++	@Override
++	public List<NotificationListener> getListeners() {
++		return new ArrayList<>(); 
++	}
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java
+new file mode 100755
+index 0000000..fb6c37a
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java
+@@ -0,0 +1,35 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.notification;
++
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++
++public interface NotificationListener {
++	
++	/**
++	 * A human readable name for this listener. Used for logging and management.
++	 */
++	String getName();
++	
++	/**
++	 * The Kafka topic to listen on.
++	 */
++	String getTopicName();
++
++	/**
++	 * This is called whenever an event arrives. Typically, one would initiate
++	 * some analysis request on the passed odf instance.
++	 */
++	void onEvent(String event, OpenDiscoveryFramework odf);
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java
+new file mode 100755
+index 0000000..ce4d8ff
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java
+@@ -0,0 +1,26 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.notification;
++
++import java.util.List;
++
++/**
++ * Provide implementations in the odf-implementations.properties file(s).
++ *
++ */
++public interface NotificationManager {
++	
++	 List<NotificationListener> getListeners();
++
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java
+new file mode 100755
+index 0000000..6b33cdd
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java
+@@ -0,0 +1,137 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.settings;
++
++import java.util.Map;
++import java.util.Properties;
++
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.configuration.ConfigManager;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.api.settings.KafkaConsumerConfig;
++import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
++import org.apache.atlas.odf.api.settings.MessagingConfiguration;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++*
++* External Java API for reading and updating ODF settings
++*
++*/
++public class SettingsManagerImpl implements SettingsManager {
++	public static final String HIDDEN_PASSWORD_IDENTIFIER = "***hidden***";
++	private ConfigManager configManager;
++
++	public SettingsManagerImpl() {
++		ODFInternalFactory f = new ODFInternalFactory();
++		configManager = f.create(ConfigManager.class);
++	}
++
++	/**
++	 * Retrieve Kafka consumer properties
++	 * @return Current Kafka consumer properties
++	 */
++	public Properties getKafkaConsumerProperties() {
++		Properties props = new Properties();
++		MessagingConfiguration messagingConfig = getODFSettings().getMessagingConfiguration();
++		if (!(messagingConfig instanceof KafkaMessagingConfiguration)) {
++			return props;
++		}
++		KafkaConsumerConfig config = ((KafkaMessagingConfiguration) messagingConfig).getKafkaConsumerConfig();
++		try {
++			JSONObject configJSON = JSONUtils.toJSONObject(config);
++			for (Object key : configJSON.keySet()) {
++				props.setProperty((String) key, String.valueOf(configJSON.get(key)));
++			}
++		} catch (JSONException e) {
++			throw new RuntimeException("The kafka consumer config could not be parsed!", e);
++		}
++		return props;
++	}
++
++	/**
++	 * Retrieve Kafka producer properties
++	 * @return Current Kafka producer properties
++	 */
++	public Properties getKafkaProducerProperties() {
++		// Currently no producer properties are editable and therefore not
++		// stored in the config file
++		Properties props = new Properties();
++		props.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
++		props.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
++		return props;
++	}
++
++	/**
++	 * Retrieve overall ODF settings including plain passwords
++	 * @return Current ODF settings
++	 */
++	public ODFSettings getODFSettings() {
++		return configManager.getConfigContainer().getOdf();
++	}
++
++	/**
++	 * Retrieve overall ODF settings with hidden passwords
++	 * @return Current ODF settings
++	 */
++	public ODFSettings getODFSettingsHidePasswords() {
++		return this.configManager.getConfigContainerHidePasswords().getOdf();
++	}
++
++	/**
++	 * Update ODF settings
++	 * 
++	 * Passwords provided as plain text will be encrypted. If HIDDEN_PASSWORD_IDENTIFIER
++	 * is provided instead of a password, the stored password will remain unchanged.
++	 * 
++	 * @param Updated ODF settings
++	 */
++	public void updateODFSettings(ODFSettings update) throws ValidationException {
++		ConfigContainer cont = new ConfigContainer();
++		cont.setOdf(update);
++		this.configManager.updateConfigContainer(cont);
++	}
++
++	/**
++	 * Reset ODF settings to the defaults
++	 */
++	public void resetODFSettings() {
++		new ODFInternalFactory().create(ConfigManager.class).resetConfigContainer();
++	}
++
++	/**
++	 * Retrieve user defined ODF properties
++	 * @return Map of user defined ODF properties
++	 */
++	public Map<String, Object> getUserDefinedConfig() {
++		return getODFSettings().getUserDefined();
++	}
++
++	/**
++	 * Update user defined ODF properties
++	 * @param Map of user defined ODF properties
++	 * @throws ValidationException
++	 */
++	public void updateUserDefined(Map<String, Object> update) throws ValidationException {
++		ODFSettings odfConfig = new ODFSettings();
++		odfConfig.setUserDefined(update);
++		updateODFSettings(odfConfig);
++	}
++}
+diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java
+new file mode 100755
+index 0000000..5bfae91
+--- /dev/null
++++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java
+@@ -0,0 +1,31 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.store;
++
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++
++public interface ODFConfigurationStorage {
++	
++	public void storeConfig(ConfigContainer container);
++
++	public ConfigContainer getConfig(ConfigContainer defaultConfiguration);
++
++	public void onConfigChange(ConfigContainer container);
++
++	public void addPendingConfigChange(String changeId);
++
++	public void removePendingConfigChange(String changeId);
++
++	public boolean isConfigChangePending(String changeId);
++}
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties
+new file mode 100755
+index 0000000..c9c21d0
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties
+@@ -0,0 +1,30 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# default implementations
++
++AnalysisRequestTrackerStore=org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore
++ThreadManager=org.apache.atlas.odf.core.controlcenter.DefaultThreadManager
++MetadataStore=org.apache.atlas.odf.core.metadata.DefaultMetadataStore
++AnnotationStore=org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore
++JDBCMetadataImporter=org.apache.atlas.odf.core.metadata.JDBCMetadataImporterImpl
++org.apache.atlas.odf.core.connectivity.DataSetRetriever=com.ibm.iis.odf.core.connectivity.DataSetRetrieverImpl
++SparkServiceExecutor=SparkServiceExecutorImpl
++Environment=org.apache.atlas.odf.core.StandaloneEnvironment
++AnalysisManager=org.apache.atlas.odf.core.analysis.AnalysisManagerImpl
++EngineManager=org.apache.atlas.odf.core.engine.EngineManagerImpl
++DiscoveryServiceManager=org.apache.atlas.odf.core.discoveryservice.DiscoveryServiceManagerImpl
++SettingsManager=org.apache.atlas.odf.core.settings.SettingsManagerImpl
++MessageEncryption=org.apache.atlas.odf.core.messaging.DefaultMessageEncryption
++TransactionContextExecutor=org.apache.atlas.odf.core.controlcenter.DefaultTransactionContextExecutor
++NotificationManager=org.apache.atlas.odf.core.notification.DefaultNotificationManager
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json
+new file mode 100755
+index 0000000..0a81029
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json
+@@ -0,0 +1,28 @@
++{
++	"odf" : {
++		"instanceId" : "odf-default-id-CHANGEME",
++		"odfUrl" : "https://localhost:58081/odf-web-1.2.0-SNAPSHOT",
++		"odfUser" : "sdp",
++		"odfPassword" : "ZzTeX3hKtVORgks+2TaLPWxerucPBoxK",
++		"consumeMessageHubEvents" : false,
++		"discoveryServiceWatcherWaitMs": 2000,
++		"reuseRequests": true,
++		"runAnalysisOnImport": false,
++		"runNewServicesOnRegistration": false,
++		"enableAnnotationPropagation": true,
++		"messagingConfiguration": {
++			"type": "com.ibm.iis.odf.api.settings.KafkaMessagingConfiguration",
++			"analysisRequestRetentionMs": 86400000,
++			"kafkaBrokerTopicReplication": 1,
++			"queueConsumerWaitMs": 5000,
++			"kafkaConsumerConfig": {
++				"offsetsStorage": "kafka",
++				"zookeeperSessionTimeoutMs": 400,
++				"zookeeperConnectionTimeoutMs": 6000
++			}
++		},
++		"userDefined": {
++		}
++	},
++	"registeredServices" : []
++}
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv
+new file mode 100755
+index 0000000..5efd809
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv
+@@ -0,0 +1,500 @@
++"CLIENT_ID","NAME","ADDRESS","ZIP","AGE","GENDER","MARITAL_STATUS","PROFESSION","NBR_YEARS_CLI","SAVINGS_ACCOUNT","ONLINE_ACCESS","JOINED_ACCOUNTS","BANKCARD","AVERAGE_BALANCE","ACCOUNT_ID","ACCOUNT_TYPE","EMAIL","CCN","PHONE1","PHONE2","CC","CONTACT"
++"1578","Tyler O Abatemarco","WittonBirmingham","77019-6813",85.0,"M","married","pensioner",7.0,"YES","NO","YES","NO",4987.44,101578,"SAVINGS","tyler_abatemarco@ibm.com",NULL,"605-555-7281","406-555-5219","LR","207-555-2684"
++"1578","Tyler O Abatemarco","WittonBirmingham","77019-6813",85.0,"M","married","pensioner",7.0,"YES","NO","YES","NO",13484.56,201578,"CHECKING","tyler_abatemarco@ibm.com",NULL,"605-555-7281","406-555-5219","LR","207-555-2684"
++"1579","Debra M Pedrick","Kings HeathBirmingham","80110-4998",78.0,"M","widowed","pensioner",10.0,"NO","NO","YES","YES",2070.00,101579,"SAVINGS","debra_pedrick@icloud.com","4146 6643 9004 5458","385-555-9954","512-555-6256","IL",NULL
++"1580","Cassandra R Ker","Kings HeathBirmingham","33781-2153",38.0,"F","married","employee",23.0,"YES","NO","YES","NO",10927.00,101580,"SAVINGS","cassandra.ker@web.de","4146 6643 9004 5458","904-555-4068",NULL,"ML","cassandra_ker@t-online.de"
++"1581","Johnson Y Foltz","351-353 Lea Bridge RoadLeyton London","34787",75.0,"F","widowed","pensioner",16.0,"YES","NO","NO","YES",13961.16,101581,"SAVINGS","foltz@de.ibm.com","213130951856200","334-555-3249",NULL,"AZ","503-555-9423"
++"1581","Johnson Y Foltz","351-353 Lea Bridge RoadLeyton London","34787",75.0,"F","widowed","pensioner",16.0,"YES","NO","NO","YES",37746.84,201581,"CHECKING","foltz@de.ibm.com","213130951856200","334-555-3249",NULL,"AZ","503-555-9423"
++"1582","Mary H Jacques","121 W Canal Stre Unit 53Leeds","00000-0000",62.0,"F","married","inactive",26.0,"NO","NO","YES","NO",46700.00,101582,"SAVINGS","mary_j@ccdef.net","3400-000000-00009","651-555-1612","717-555-3906","VA","307-555-1593"
++"1583","Pual G Fowler","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","19175-5490",59.0,"F","married","inactive",7.0,"NO","NO","YES","NO",58749.00,101583,"SAVINGS","pual_fowler@aol.com","3400-000000-00009","404-555-4055",NULL,"GU","fowler@gmx.net"
++"1584","Thoang D Meyers","Woodilee RoadKirkintilloch Glasgo","78723-6199",86.0,"F","married","inactive",18.0,"YES","NO","NO","NO",1712.61,101584,"SAVINGS","thoang_m@gmx.net","3528-3095-1856-2063","717-555-9363","501-555-9558","FJ","303-555-5512"
++"1584","Thoang D Meyers","Woodilee RoadKirkintilloch Glasgo","78723-6199",86.0,"F","married","inactive",18.0,"YES","NO","NO","NO",4630.39,201584,"CHECKING","thoang_m@gmx.net","3528-3095-1856-2063","717-555-9363","501-555-9558","FJ","303-555-5512"
++"1585","Janet M Alcazar","CLITTAFORD RD SOUTHWAYPLYMOUTH","34741-5027",24.0,"M","single","worker",0.0,"NO","YES","NO","YES",824.00,101585,"SAVINGS","janet_alcazar@gmx.net","3400-000000-00009","785-555-2001","401-555-214","SA","503-555-3229"
++"1586","Richard A Pringle","257 Great Lister Street P O BOX 1107 Birmingham","32114-3851",57.0,"F","married","employee",37.0,"YES","NO","YES","NO",29236.00,101586,"SAVINGS","richardpringle@msl.org","5383908528354962","850-555-8595","505-555-156","CA","360-555-4969"
++"1587","Christina P Gee","EASTLEIGHHAMPSHIRE S050 4EX","86047-2538",66.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",4424.76,101587,"SAVINGS","gee@blue.com","5423111111111111","850-555-3929","317-555-6474","DJ","501-555-9658"
++"1587","Christina P Gee","EASTLEIGHHAMPSHIRE S050 4EX","86047-2538",66.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",11963.24,201587,"CHECKING","gee@blue.com","5423111111111111","850-555-3929","317-555-6474","DJ","501-555-9658"
++"1588","Maurits Q Schuller","2560 MCMULLEN BOOTH RDCLEARWATER","33761-4100",67.0,"F","single","pensioner",25.0,"YES","NO","NO","YES",22249.00,101588,"SAVINGS","mschuller@t-online.de","36111111111111","517-555-8548","202-555-9609","NP","785-555-219"
++"1589","Lillian R Isaac","FleetesexHampshire","33309-3421",72.0,"M","single","pensioner",26.0,"NO","NO","NO","YES",32952.00,101589,"SAVINGS","isaac@icloud.com","6520224090045455","517-555-8413",NULL,"PH","517-555-6352"
++"1590","Lucy V Adler","Blucher StreetBirmingham","27406-6355",62.0,"F","married","inactive",15.0,"NO","NO","YES","NO",3703.05,101590,"SAVINGS","adler@t-online.de","378282246310005","225-555-32",NULL,"HM","501-555-1240"
++"1590","Lucy V Adler","Blucher StreetBirmingham","27406-6355",62.0,"F","married","inactive",15.0,"NO","NO","YES","NO",10011.94,201590,"CHECKING","adler@t-online.de","378282246310005","225-555-32",NULL,"HM","501-555-1240"
++"1591","Cory J Gardner","5 Oxford RoadNewbury","34742-0460",88.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",12870.00,101591,"CHECKING","cory_g@t-online.de",NULL,"808-555-6833",NULL,"NI","860-555-5925"
++"1592","Dixie C Weitzel","Rockingham RoadLeicester","77318-6919",43.0,"M","married","farmer",22.0,"YES","NO","YES","NO",16174.00,101592,"CHECKING","dixie.weitzel@t-online.de","180030951856201","919-555-1628",NULL,"AL",NULL
++"1593","Cleo D Lamkin","58/59 Lower High StreetWest Midlands","86403-6886",58.0,"M","married","worker",31.0,"YES","NO","YES","NO",19956.78,101593,"CHECKING","lamkin@de.ibm.com","5169 7990 9185 4334","904-555-9488",NULL,"SJ","804-555-4083"
++"1593","Cleo D Lamkin","58/59 Lower High StreetWest Midlands","86403-6886",58.0,"M","married","worker",31.0,"YES","NO","YES","NO",53957.22,201593,"SAVINGS","lamkin@de.ibm.com","5169 7990 9185 4334","904-555-9488",NULL,"SJ","804-555-4083"
++"1594","Harm Z Rossman","Tame RoadBirmingham","",61.0,"F","married","inactive",18.0,"NO","NO","NO","NO",4862.00,101594,"CHECKING","harm_rossman@icloud.com","6220264390045758","417-555-4830",NULL,"DO","360-555-9576"
++"1595","Paulline X Daby","HockleyBirmingham","19477",64.0,"F","single","inactive",23.0,"NO","YES","YES","YES",3580.00,101595,"CHECKING","daby@cicn.gov","6011567891012132","775-555-1070",NULL,"AO","802-555-6080"
++"1596","Aaron L Ayala","Elmwood AvenueFeltham","77429-1770",22.0,"M","single","inactive",8.0,"NO","NO","NO","YES",1273.86,101596,"CHECKING","aaron_ayala@yahoo.com","5462522444922689","601-555-5744",NULL,"KP","aaron_ayala@ibm.com"
++"1596","Aaron L Ayala","Elmwood AvenueFeltham","77429-1770",22.0,"M","single","inactive",8.0,"NO","NO","NO","YES",3444.14,201596,"SAVINGS","aaron_ayala@yahoo.com","5462522444922689","601-555-5744",NULL,"KP","aaron_ayala@ibm.com"
++"1597","Roselie S Worley","69 MOLAND STRRETBIRMINGHAM","28204-2120",50.0,"M","married","executives,self-employed",17.0,"NO","NO","NO","YES",13245.00,101597,"CHECKING","roselie_w@ccdef.net","30411111111111","402-555-4602","701-555-4795","SO","360-555-686"
++"1598","Ajaymu R Parghi","AbercraveSwansea","80901",71.0,"F","married","inactive",20.0,"NO","NO","YES","NO",3589.00,101598,"CHECKING","parghi@gmx.net","4024 0071 2159 5481","207-555-5103",NULL,"BN","404-555-4633"
++"1599","Keith Z Flynn","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","85602-7058",45.0,"M","cohabitant","craftsmen, storekeepers",8.0,"NO","NO","YES","NO",1738.53,101599,"CHECKING","kflynn@web.de","5169799091854334","573-555-446","860-555-9280","YE","401-555-1095"
++"1599","Keith Z Flynn","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","85602-7058",45.0,"M","cohabitant","craftsmen, storekeepers",8.0,"NO","NO","YES","NO",4700.47,201599,"SAVINGS","kflynn@web.de","5169799091854334","573-555-446","860-555-9280","YE","401-555-1095"
++"1600","Lanh I Redding","Thurmaston LaneLeicester","30340-1452",24.0,"M","single","inactive",6.0,"NO","YES","NO","YES",24808.00,101600,"CHECKING","lanh.redding@ccdef.net","30111111161229","334-555-9104","860-555-501","TP","lredding@icloud.com"
++"1601","Jeri H Redlinger","173 Friar StreetReading","29208",73.0,"M","married","pensioner",32.0,"YES","NO","YES","NO",24112.00,101601,"CHECKING","jeri_redlinger@aol.com","30210111161229",NULL,"808-555-3531","MQ",NULL
++"1602","Miyim M Arcangel","173 Friar Street 173 Friar Street Reading","99523-2764",59.0,"F","married","inactive",30.0,"YES","NO","YES","NO",15677.55,101602,"CHECKING","miyim.arcangel@web.de","4024 0071 2159 5481","916-555-1548","303-555-3886","CH",NULL
++"1602","Miyim M Arcangel","173 Friar Street 173 Friar Street Reading","99523-2764",59.0,"F","married","inactive",30.0,"YES","NO","YES","NO",42387.45,201602,"SAVINGS","miyim.arcangel@web.de","4024 0071 2159 5481","916-555-1548","303-555-3886","CH",NULL
++"1603","Ofelia G Miyauchi","Chandler`s FordEastleigh","78756-3216",50.0,"M","single","executives,self-employed",21.0,"NO","YES","NO","YES",4648.00,101603,"CHECKING","ofelia_m@msl.org","5285696282092972","904-555-7284",NULL,"IT","617-555-3241"
++"1604","Shelley Z Lawrence","DroitwichWorcester","78028-2709",41.0,"F","married","employee",26.0,"NO","NO","YES","NO",8615.00,101604,"CHECKING","shelley_lawrence@de.ibm.com","3528-3095-1856-2063","919-555-6224","808-555-7506","BY","615-555-5739"
++"1605","Robt R Ewing","STAFFA ROADLONDON","33932",72.0,"M","single","worker",27.0,"NO","NO","NO","NO",10021.59,101605,"SAVINGS","ewing@de.ibm.com","5383908528354962","360-555-6105","401-555-5080","QA","302-555-3375"
++"1605","Robt R Ewing","STAFFA ROADLONDON","33932",72.0,"M","single","worker",27.0,"NO","NO","NO","NO",27095.41,201605,"CHECKING","ewing@de.ibm.com","5383908528354962","360-555-6105","401-555-5080","QA","302-555-3375"
++"1606","Margreta Q Major","TamworthStaffs","33762-4933",62.0,"F","widowed","pensioner",1.0,"NO","NO","NO","YES",28450.00,101606,"SAVINGS","major@icloud.com","30310111161029","401-555-3077","225-555-2687","PH","major@t-online.de"
++"1607","Darryl E Read","NE 22 7 AANORTHUMBERLAND","0",71.0,"F","married","pensioner",20.0,"YES","NO","YES","YES",52266.00,101607,"SAVINGS","darryl_read@cicn.gov","5383908528354962","919-555-5740",NULL,"CZ","804-555-5268"
++"1608","Charles T Archer","Bellbrook ParkUckfield","85339-1777",76.0,"M","married","inactive",10.0,"NO","NO","YES","NO",29859.84,101608,"SAVINGS","charles_a@ibm.com","4024 0071 2159 5481","401-555-6122",NULL,"AQ","717-555-5402"
++"1608","Charles T Archer","Bellbrook ParkUckfield","85339-1777",76.0,"M","married","inactive",10.0,"NO","NO","YES","NO",80732.16,201608,"CHECKING","charles_a@ibm.com","4024 0071 2159 5481","401-555-6122",NULL,"AQ","717-555-5402"
++"1609","Ramona I Holden","IDOTTSVAGEN 7SWEDEN","71134",60.0,"M","married","pensioner",25.0,"NO","NO","NO","YES",7228.00,101609,"CHECKING","ramonaholden@blue.com","4146-6643-9004-5458","614-555-3324","502-555-7822","UG","417-555-688"
++"1610","Omid E Kerns","--------------------","0",20.0,"F","single","inactive",3.0,"NO","NO","NO","YES",1390.00,101610,"CHECKING","kerns@de.ibm.com","3400 000000 00009","518-555-3168","307-555-9628","FK","916-555-9610"
++"1611","Tom M Augustine","Barkby RoadLeicester","31008",44.0,"M","single","craftsmen, storekeepers",14.0,"NO","NO","YES","YES",2654.64,101611,"CHECKING","tom_augustine@yahoo.com","4024-0071-2159-5481","406-555-584",NULL,"BM","tom.augustine@gmx.net"
++"1611","Tom M Augustine","Barkby RoadLeicester","31008",44.0,"M","single","craftsmen, storekeepers",14.0,"NO","NO","YES","YES",7177.36,201611,"SAVINGS","tom_augustine@yahoo.com","4024-0071-2159-5481","406-555-584",NULL,"BM","tom.augustine@gmx.net"
++"1612","Jas S Antunes","Green LaneHeywood","78956-1353",73.0,"M","married","inactive",27.0,"NO","NO","YES","NO",11994.00,101612,"CHECKING","jas.antunes@web.de","30310111161029","804-555-2167","406-555-1196","KZ","615-555-8050"
++"1613","Georganne F Furgason","Forrest RoadMiddx","93267-0942",70.0,"M","married","pensioner",24.0,"NO","NO","YES","NO",22831.00,101613,"CHECKING",NULL,"6220264390045758","515-555-1188","225-555-5673","ST",NULL
++"1614","Elsa Z Hargreaves","GarnantAmmanford","98350",31.0,"M","married","worker",4.0,"NO","YES","YES","NO",28.62,101614,"SAVINGS","ehargreaves@gmail.com",NULL,"808-555-637","803-555-2343","GU","elsa_h@t-online.de"
++"1614","Elsa Z Hargreaves","GarnantAmmanford","98350",31.0,"M","married","worker",4.0,"NO","YES","YES","NO",77.38,201614,"CHECKING","ehargreaves@gmail.com",NULL,"808-555-637","803-555-2343","GU","elsa_h@t-online.de"
++"1615","Emil B Willcock","STAFFORD PARK 4SHROPSHIRE","86406-6159",59.0,"M","single","worker",7.0,"YES","NO","NO","YES",7249.00,101615,"SAVINGS","emil_w@gmx.net","3528-3095-1856-2063","603-555-4379",NULL,"ST","502-555-2481"
++"1616","Roseanne H Tuttas","178/188 Great South West Road 178/188 Great South West Road Hounslow","27698-0001",65.0,"F","married","inactive",10.0,"NO","NO","NO","YES",6321.00,101616,"SAVINGS","roseanne.tuttas@gmx.net","3400 000000 00009","614-555-9752","401-555-2837","SN","tuttas@yahoo.com"
++"1617","Clifton Z Cary","HorsforthLeeds","98404-4508",91.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",4047.57,101617,"SAVINGS","clifton.cary@gmx.net","30210111161229","307-555-6129",NULL,"GA","505-555-9215"
++"1617","Clifton Z Cary","HorsforthLeeds","98404-4508",91.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",10943.43,201617,"CHECKING","clifton.cary@gmx.net","30210111161229","307-555-6129",NULL,"GA","505-555-9215"
++"1618","Benny F Kane","LympheHythe","80901",63.0,"M","married","pensioner",18.0,"NO","NO","YES","NO",81764.00,101618,"SAVINGS","benny_kane@blue.com","3530 1113 3330 0000","804-555-6063","385-555-1568","UG","775-555-1742"
++"1619","Chatherine N Sprung","AbergavennyGwent","32137-2415",33.0,"M","cohabitant","intermediate professions",8.0,"NO","NO","YES","YES",2456.00,101619,"CHECKING","chatherine_sprung@ccdef.net","5169799091854334","503-555-2264",NULL,"GP","sprung@cicn.gov"
++"1620","Seth W Bruns","DARTMOUTH ROADWEST MIDLANDS","85281-4961",60.0,"F","married","inactive",17.0,"NO","NO","YES","NO",6287.76,101620,"CHECKING","seth_bruns@blue.com","30310111161029","417-555-5100","808-555-6656","GE","seth.bruns@blue.com"
++"1620","Seth W Bruns","DARTMOUTH ROADWEST MIDLANDS","85281-4961",60.0,"F","married","inactive",17.0,"NO","NO","YES","NO",17000.23,201620,"SAVINGS","seth_bruns@blue.com","30310111161029","417-555-5100","808-555-6656","GE","seth.bruns@blue.com"
++"1621","Alberto Y Forrest","LympheHythe","30214-0604",71.0,"F","married","inactive",35.0,"YES","NO","NO","NO",-44.00,101621,"CHECKING","forrest@icloud.com","5285696282092972","302-555-3810","615-555-3144","GH","517-555-5462"
++"1622","Meir W Frizzell","Sanderson StreetSheffield","78028-2766",20.0,"M","single","inactive",1.0,"NO","NO","NO","YES",1735.00,101622,"CHECKING","frizzell@web.de",NULL,"334-555-2322",NULL,"MT","515-555-5669"
++"1623","Ron M Effinger","GoldthorpeRotherham","32206",63.0,"F","widowed","pensioner",20.0,"YES","NO","YES","YES",1436.94,101623,"CHECKING","ron_e@aol.com","5580977968891503","843-555-1834",NULL,"LA","601-555-1708"
++"1623","Ron M Effinger","GoldthorpeRotherham","32206",63.0,"F","widowed","pensioner",20.0,"YES","NO","YES","YES",3885.06,201623,"SAVINGS","ron_e@aol.com","5580977968891503","843-555-1834",NULL,"LA","601-555-1708"
++"1624","Dorsey T Barnett","----------Northampton","85260",37.0,"F","married","craftsmen, storekeepers",17.0,"NO","NO","NO","YES",6858.00,101624,"CHECKING","barnett@ibm.com","3530 1113 3330 0000","573-555-9135","405-555-3667","CV","barnett@gmail.com"
++"1625","Napoleon B Ackel","Berkswell RoadMeriden","802",72.0,"M","married","pensioner",16.0,"NO","NO","YES","YES",59778.00,101625,"CHECKING","napoleon.ackel@msn.com","5285696282092972","843-555-3225","208-555-9759","PT","804-555-558"
++"1626","Frances I Kerr","91/95 PEREGRINE ROADHAINAULT ILFORD ESSE","80901",23.0,"F","single","executives,self-employed",0.0,"NO","YES","NO","YES",628.02,101626,"CHECKING","fkerr@ccdef.net","213130951856200","916-555-9280","334-555-4432","UA","401-555-9719"
++"1626","Frances I Kerr","91/95 PEREGRINE ROADHAINAULT ILFORD ESSE","80901",23.0,"F","single","executives,self-employed",0.0,"NO","YES","NO","YES",1697.98,201626,"SAVINGS","fkerr@ccdef.net","213130951856200","916-555-9280","334-555-4432","UA","401-555-9719"
++"1627","Celes Y Torres","Headlands RoadLiversedge","53278-0001",26.0,"F","single","employee",2.0,"NO","NO","NO","YES",4186.00,101627,"SAVINGS","celes_torres@t-online.de","30111111161229","916-555-3705",NULL,"GB","617-555-1360"
++"1628","Maryann L Ehrenreich","Springhead Enterprise ParkNorthfleet","47111",57.0,"F","married","inactive",21.0,"NO","NO","YES","NO",4679.00,101628,"SAVINGS","maryann_e@ibm.com","4146 6643 9004 5458","602-555-3489","651-555-2609","NZ","614-555-8858"
++"1629","Brandon B Ho","--------------------","0",19.0,"M","single","inactive",3.0,"NO","YES","NO","YES",96.66,101629,"SAVINGS","ho@aol.com","4024 0071 2159 5481","609-555-2401",NULL,"DE",NULL
++"1629","Brandon B Ho","--------------------","0",19.0,"M","single","inactive",3.0,"NO","YES","NO","YES",261.34,201629,"CHECKING","ho@aol.com","4024 0071 2159 5481","609-555-2401",NULL,"DE",NULL
++"1630","Emily H Litthuanian","5 Warf LaneLeicester","36066",33.0,"M","married","worker",1.0,"NO","YES","NO","YES",1846.00,101630,"SAVINGS","emily_l@aol.com","3400 000000 00009","614-555-9051","334-555-7455","CV","208-555-3618"
++"1631","Joan C Hamilton","STAFFORD PARK 4SHROPSHIRE","78954-0089",26.0,"M","single","inactive",2.0,"NO","YES","NO","YES",4411.00,101631,"SAVINGS","hamilton@ibm.com","5285696282092972","307-555-2439","402-555-3912","PM","417-555-6540"
++"1632","Dana I Zerr","----------Bracknell","30024",61.0,"F","married","pensioner",11.0,"NO","NO","YES","NO",924.21,101632,"CHECKING","zerr@gmx.net","30310111161029","608-555-708","208-555-4948","TH","717-555-3869"
++"1632","Dana I Zerr","----------Bracknell","30024",61.0,"F","married","pensioner",11.0,"NO","NO","YES","NO",2498.79,201632,"SAVINGS","zerr@gmx.net","30310111161029","608-555-708","208-555-4948","TH","717-555-3869"
++"1633","Moise U Alparaz","Bishops StortfordHertfordshire","28677-5643",76.0,"F","married","inactive",0.0,"YES","NO","NO","NO",9990.00,101633,"CHECKING","moise_a@ccdef.net","5169-7990-9185-4334","843-555-5574","614-555-9230","PA","919-555-7729"
++"1634","Royce P Pixler","Halesfield 2Telford","80901",65.0,"F","married","inactive",28.0,"YES","NO","YES","NO",32778.00,101634,"CHECKING","royce_p@icloud.com","5423111111111111","404-555-2800",NULL,"PW","406-555-5274"
++"1635","Adriane J Lyons","WOODILEE INDUSTRIAL ESTATEGLASGOW","37209-1050",72.0,"F","married","inactive",17.0,"NO","NO","NO","NO",2294.73,101635,"CHECKING",NULL,"5169-7990-9185-4334","850-555-7966",NULL,"CI","360-555-3589"
++"1635","Adriane J Lyons","WOODILEE INDUSTRIAL ESTATEGLASGOW","37209-1050",72.0,"F","married","inactive",17.0,"NO","NO","NO","NO",6204.26,201635,"SAVINGS",NULL,"5169-7990-9185-4334","850-555-7966",NULL,"CI","360-555-3589"
++"1636","Bradley N Lopez","----------SWANSEA","33850-1447",36.0,"F","married","inactive",8.0,"NO","NO","YES","NO",10083.00,101636,"CHECKING","lopez@blue.com","3400-000000-00009","317-555-1239",NULL,"RE","785-555-2986"
++"1637","Gini Q Margeson","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","33027-2936",47.0,"F","married","inactive",16.0,"NO","NO","YES","NO",39770.00,101637,"CHECKING","gini_m@ibm.com","36111111111111","615-555-8877","860-555-8074","SZ","804-555-7416"
++"1638","Ye W Di Santo","Lower High StreetWednesbury","80901",88.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",14861.07,101638,"CHECKING","ysanto@aol.com","30011111111119","401-555-1658","303-555-3467","ET","512-555-8345"
++"1638","Ye W Di Santo","Lower High StreetWednesbury","80901",88.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",40179.93,201638,"SAVINGS","ysanto@aol.com","30011111111119","401-555-1658","303-555-3467","ET","512-555-8345"
++"1639","Bennie T Kroeplin","CRENDON STREETBUCKS","85750-9670",61.0,"M","married","worker",16.0,"NO","NO","NO","YES",28058.00,101639,"CHECKING","benniekroeplin@t-online.de",NULL,"850-555-4596",NULL,"HN","860-555-8655"
++"1640","Willie O Bower","North LaneAldershot","07606-1412",45.0,"M","married","craftsmen, storekeepers",0.0,"YES","YES","YES","YES",0.00,101640,"CHECKING","willie_bower@t-online.de","38111111111119","804-555-1555",NULL,"ES","904-555-5859"
++"1641","Donna I Crowder","69 MOLAND STRRETBIRMINGHAM","99523-2764",72.0,"F","widowed","inactive",18.0,"YES","NO","NO","NO",6201.90,101641,"SAVINGS","donna_c@msn.com","5580977968891503","701-555-8108","651-555-450","SB",NULL
++"1641","Donna I Crowder","69 MOLAND STRRETBIRMINGHAM","99523-2764",72.0,"F","widowed","inactive",18.0,"YES","NO","NO","NO",16768.10,201641,"CHECKING","donna_c@msn.com","5580977968891503","701-555-8108","651-555-450","SB",NULL
++"1642","Aziza F Mills","LOWER MILE HOUSE LANENEWCASTLE UNDER LYME","80901",37.0,"F","single","intermediate professions",18.0,"NO","NO","NO","NO",34385.00,101642,"SAVINGS","mills@ibm.com","4024007121595481","916-555-7933","860-555-2597","AS","601-555-2471"
++"1643","Whitney K Aleksintser","White Horse Business ParkTrowbridge","45331",61.0,"M","married","executives,self-employed",30.0,"NO","NO","NO","NO",7146.00,101643,"SAVINGS","whitney_a@yahoo.com","38111111111119","651-555-7514",NULL,"GL","405-555-8292"
++"1644","Ethem L Nylund","----------WARWICK","78028-4504",37.0,"F","married","worker",20.0,"NO","NO","YES","NO",59.13,101644,"SAVINGS","nylund@aol.com","340000000000009","202-555-7845","602-555-9917","RU","502-555-3421"
++"1644","Ethem L Nylund","----------WARWICK","78028-4504",37.0,"F","married","worker",20.0,"NO","NO","YES","NO",159.87,201644,"CHECKING","nylund@aol.com","340000000000009","202-555-7845","602-555-9917","RU","502-555-3421"
++"1645","Cammile G Affinito","55 London RoadSt Albans","32805-6182",20.0,"F","single","inactive",6.0,"NO","YES","NO","YES",3172.00,101645,"SAVINGS","cammile_affinito@t-online.de","5285696282092972","208-555-7514",NULL,"HU","affinito@gmx.net"
++"1646","Tracy K Dufault","KETTLES WOOD DRIVEBIRMINGHAM","85267-4944",76.0,"F","divorced","pensioner",11.0,"NO","NO","NO","YES",16825.00,101646,"CHECKING","dufault@t-online.de","4024 0071 2159 5481","505-555-2305",NULL,"BZ",NULL
++"1647","Bailey L Santillana","WittonBirmingham","91406-1923",26.0,"F","cohabitant","intermediate professions",1.0,"NO","NO","YES","NO",120.15,101647,"CHECKING",NULL,"213130951856200","317-555-1076","401-555-219","VN","417-555-8620"
++"1647","Bailey L Santillana","WittonBirmingham","91406-1923",26.0,"F","cohabitant","intermediate professions",1.0,"NO","NO","YES","NO",324.84,201647,"SAVINGS",NULL,"213130951856200","317-555-1076","401-555-219","VN","417-555-8620"
++"1648","Guss R Haberle","SmallfieldHorley","27587-9693",76.0,"F","widowed","inactive",5.0,"NO","NO","NO","NO",7398.00,101648,"CHECKING","ghaberle@ccdef.net","30411111111111","360-555-4128","804-555-2328","CU","919-555-1479"
++"1649","Theodor N Aguire","13Earlstrees Road CorbyNorthants NN 17 4NP","76011-7603",86.0,"M","married","pensioner",19.0,"YES","NO","YES","YES",11893.00,101649,"CHECKING","theodor_aguire@ccdef.net","36111111111111","302-555-9381","405-555-4839","YT","406-555-9128"
++"1650","Andy U Minarsky","AbergavennyGwent","33311-5603",76.0,"M","married","pensioner",20.0,"YES","NO","YES","NO",4794.66,101650,"CHECKING","minarsky@cicn.gov","5169799091854334","804-555-5514","804-555-8450","FJ","aminarsky@msl.org"
++"1650","Andy U Minarsky","AbergavennyGwent","33311-5603",76.0,"M","married","pensioner",20.0,"YES","NO","YES","NO",12963.34,201650,"SAVINGS","minarsky@cicn.gov","5169799091854334","804-555-5514","804-555-8450","FJ","aminarsky@msl.org"
++"1651","Warren W Granger","Birmingham RoadRedditch","33782-4704",79.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",102680.00,101651,"CHECKING","warren.granger@gmail.com","5580977968891503","843-555-8596","303-555-3586","GR","609-555-1382"
++"1652","Francezka K Mesa","Garstang Garstang Preston","28289-0241",20.0,"M","single","inactive",3.0,"NO","NO","NO","YES",2252.00,101652,"CHECKING","mesa@web.de","3528-3095-1856-2063","651-555-3697",NULL,"GI",NULL
++"1653","Felimon M Dale","Washington RoadSunderland","72204-2208",23.0,"M","single","employee",4.0,"NO","YES","NO","YES",9943.83,101653,"CHECKING","fdale@ccdef.net","4146-6643-9004-5458","919-555-6108","904-555-2464","PW","602-555-6054"
++"1653","Felimon M Dale","Washington RoadSunderland","72204-2208",23.0,"M","single","employee",4.0,"NO","YES","NO","YES",26885.17,201653,"SAVINGS","fdale@ccdef.net","4146-6643-9004-5458","919-555-6108","904-555-2464","PW","602-555-6054"
++"1654","Claude S Fitzgerald","----------Telford","77378-8793",53.0,"M","married","worker",15.0,"NO","NO","YES","NO",5579.00,101654,"CHECKING","claude_fitzgerald@aol.com","180030951856201","334-555-2940",NULL,"CU","401-555-60"
++"1655","Rhonda Y Heinen","GarstangPreston","22031",41.0,"F","widowed","employee",18.0,"YES","NO","NO","YES",2818.00,101655,"CHECKING",NULL,"30111111161229","601-555-4326",NULL,"DM","208-555-5730"
++"1656","Carryl Q Pash","King StreetBedworth","33311",72.0,"M","married","inactive",13.0,"NO","NO","YES","YES",1172.88,101656,"CHECKING","carryl.pash@aol.com","6220264390045758","503-555-8214","775-555-6116","NZ","225-555-5731"
++"1656","Carryl Q Pash","King StreetBedworth","33311",72.0,"M","married","inactive",13.0,"NO","NO","YES","YES",3171.12,201656,"SAVINGS","carryl.pash@aol.com","6220264390045758","503-555-8214","775-555-6116","NZ","225-555-5731"
++"1657","Maury C Graff","Green LaneHeywood","98006-2121",59.0,"M","single","farmer",11.0,"NO","NO","NO","YES",46204.00,101657,"CHECKING","graff@aol.com","5169799091854334","505-555-1172",NULL,"CN","334-555-423"
++"1658","Norio P Munsil","WittonBirmingham","98324-0296",42.0,"M","single","worker",20.0,"NO","NO","NO","NO",17262.00,101658,"CHECKING","norio_munsil@gmx.net","3400-000000-00009","775-555-5550",NULL,"MW","916-555-2023"
++"1659","Sam U Radunsky","Hall GreenBirmingham","77036-7418",23.0,"M","single","inactive",7.0,"NO","YES","NO","YES",-46.71,101659,"CHECKING","sam_radunsky@ccdef.net","4146664390045458","608-555-7561","405-555-6650","JO","502-555-219"
++"1659","Sam U Radunsky","Hall GreenBirmingham","77036-7418",23.0,"M","single","inactive",7.0,"NO","YES","NO","YES",-126.28,201659,"SAVINGS","sam_radunsky@ccdef.net","4146664390045458","608-555-7561","405-555-6650","JO","502-555-219"
++"1660","Reid I Cavins","FACTORY ROADUPTON - POOLE - DORS","86301-3214",40.0,"M","married","worker",21.0,"NO","YES","YES","YES",2590.00,101660,"CHECKING","cavins@blue.com","378282246310005","850-555-2345","208-555-444","AM","651-555-7387"
++"1661","Pam A Cehula","RIVERSIDE WAYCAMBERLEY SURREY GU","34104",26.0,"M","single","inactive",7.0,"NO","NO","NO","YES",27354.00,101661,"SAVINGS","pam_c@aol.com","38111111111119","919-555-8939",NULL,"OM","207-555-8459"
++"1662","Skeets A Chachanashvili","Merse RoadRedditch","22031",46.0,"F","married","intermediate professions",23.0,"YES","NO","YES","NO",205.74,101662,"SAVINGS","skeets_chachanashvili@gmx.net",NULL,"334-555-6146",NULL,"DZ","512-555-1848"
++"1662","Skeets A Chachanashvili","Merse RoadRedditch","22031",46.0,"F","married","intermediate professions",23.0,"YES","NO","YES","NO",556.26,201662,"CHECKING","skeets_chachanashvili@gmx.net",NULL,"334-555-6146",NULL,"DZ","512-555-1848"
++"1663","Marc L Huez","STAFFORD PARK 4SHROPSHIRE","27604",64.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",20585.00,101663,"SAVINGS","marc_huez@ibm.com","6520224090045455","701-555-9617","775-555-3421","CX","huez@de.ibm.com"
++"1664","Kenneth K Hewitt","RAMSBOTTOMLANCS.","99523-2764",21.0,"F","single","inactive",5.0,"NO","NO","NO","YES",1229.00,101664,"SAVINGS","kenneth.hewitt@web.de","5520111111111121","617-555-4993","614-555-295","BD","401-555-5112"
++"1665","Dick I Eisner","Hail WestonCAMBS","98580",76.0,"F","widowed","pensioner",20.0,"YES","NO","NO","YES",6949.26,101665,"SAVINGS","eisner@web.de","6220264390045758","603-555-24","808-555-6445","SI","601-555-4159"
++"1665","Dick I Eisner","Hail WestonCAMBS","98580",76.0,"F","widowed","pensioner",20.0,"YES","NO","NO","YES",18788.73,201665,"CHECKING","eisner@web.de","6220264390045758","603-555-24","808-555-6445","SI","601-555-4159"
++"1666","Jacqueline W Madsen","IDOTTSVAGEN 7SWEDEN","71134",25.0,"M","single","inactive",8.0,"NO","YES","NO","YES",3566.00,101666,"CHECKING","jacqueline_m@cicn.gov","3400 000000 00009","843-555-9296",NULL,"MW","860-555-8376"
++"1667","Jerald Z Allen","Hail WestonCAMBS","75238-1395",82.0,"F","widowed","inactive",12.0,"NO","NO","YES","NO",13455.00,101667,"CHECKING","jerald_allen@msn.com","6220264390045758","803-555-3807",NULL,"MZ","allen@aol.com"
++"1668","Myrna A Pham","Canal RoadLeeds","19440-1557",69.0,"M","single","pensioner",26.0,"NO","NO","NO","NO",6438.15,101668,"CHECKING","mpham@ibm.com","5580977968891503","360-555-7012",NULL,"CD","202-555-9480"
++"1668","Myrna A Pham","Canal RoadLeeds","19440-1557",69.0,"M","single","pensioner",26.0,"NO","NO","NO","NO",17406.85,201668,"SAVINGS","mpham@ibm.com","5580977968891503","360-555-7012",NULL,"CD","202-555-9480"
++"1669","Hurston D Eisele","Tame RoadBirmingham","75091-2146",33.0,"M","married","worker",12.0,"NO","YES","YES","NO",2565.00,101669,"CHECKING","hurston.eisele@t-online.de","5423111111111111","402-555-9571","518-555-8763","NA","hurston.eisele@icloud.com"
++"1670","Darryl J Morey","5 Oxford RoadNewbury","86401-9440",77.0,"F","married","inactive",19.0,"NO","NO","YES","NO",165821.00,101670,"SAVINGS","darryl_m@web.de","4024 0071 2159 5481","904-555-7968",NULL,"EC","302-555-9551"
++"1671","Misty X Hasten","WATCHMEADHERTFORDSHIRE","29512",76.0,"M","separated","pensioner",37.0,"NO","NO","NO","NO",1929.69,101671,"SAVINGS","misty_h@yahoo.com",NULL,"860-555-8075",NULL,"CA","609-555-4242"
++"1671","Misty X Hasten","WATCHMEADHERTFORDSHIRE","29512",76.0,"M","separated","pensioner",37.0,"NO","NO","NO","NO",5217.30,201671,"CHECKING","misty_h@yahoo.com",NULL,"860-555-8075",NULL,"CA","609-555-4242"
++"1754","Shelley I Baxter","LlantrisantPontyclun","75381-9060",47.0,"F","single","inactive",3.0,"NO","YES","NO","YES",-5.00,101754,"CHECKING","shelleybaxter@gmx.net","5462522444922689","785-555-1261",NULL,"RO","775-555-5290"
++"1672","Gilmore W Beil","Small HeathBirmingham","98324",48.0,"F","divorced","worker",8.0,"NO","NO","YES","NO",2677.00,101672,"SAVINGS","gilmore_b@icloud.com","4024007121595481","614-555-7414","601-555-7793","YE","303-555-553"
++"1673","Philippe X Kunze","17 Victoria RoadStaffs","85242-4092",54.0,"M","married","inactive",27.0,"NO","NO","YES","NO",43933.00,101673,"SAVINGS","philippe_kunze@msl.org","3528-3095-1856-2063","850-555-1909","307-555-7847","HK","303-555-8095"
++"1674","Yvette Q Bouchard","1 CHURCH ROWKENT","67846-5669",68.0,"F","married","inactive",2.0,"NO","NO","YES","YES",28300.59,101674,"SAVINGS","ybouchard@msl.org","4146 6643 9004 5458","512-555-5411",NULL,"FX","bouchard@web.de"
++"1674","Yvette Q Bouchard","1 CHURCH ROWKENT","67846-5669",68.0,"F","married","inactive",2.0,"NO","NO","YES","YES",76516.41,201674,"CHECKING","ybouchard@msl.org","4146 6643 9004 5458","512-555-5411",NULL,"FX","bouchard@web.de"
++"1675","Cleaburne M Amos","Guildhall LaneLeicester","77356",40.0,"M","separated","worker",7.0,"NO","NO","NO","YES",3438.00,101675,"SAVINGS",NULL,"3400 000000 00009","617-555-4587","907-555-9662","SI","605-555-7071"
++"1676","Zipporah N Lucey","FOUNTAYNE ROADLONDON","86324-2661",26.0,"M","single","inactive",7.0,"YES","NO","NO","YES",20015.00,101676,"SAVINGS","zipporah_l@web.de","4146664390045458","515-555-9258",NULL,"TD","603-555-2008"
++"1677","Herbert D Westling","Brickyard RoadWalsall","80911",27.0,"M","single","worker",3.0,"NO","YES","NO","YES",62.37,101677,"SAVINGS","westling@cicn.gov","5383908528354962","808-555-7358","651-555-7704","SA",NULL
++"1677","Herbert D Westling","Brickyard RoadWalsall","80911",27.0,"M","single","worker",3.0,"NO","YES","NO","YES",168.63,201677,"CHECKING","westling@cicn.gov","5383908528354962","808-555-7358","651-555-7704","SA",NULL
++"1678","Lauren K Kelley","Boyatt WoodEastleigh","98055-2307",72.0,"F","widowed","pensioner",22.0,"NO","NO","NO","NO",41096.00,101678,"CHECKING","lauren_kelley@t-online.de","3528-3095-1856-2063","208-555-899",NULL,"LS","lauren_kelley@de.ibm.com"
++"1679","Kameron N Aman","5 Warf LaneLeicester","86340",68.0,"F","married","pensioner",21.0,"NO","NO","YES","YES",15124.00,101679,"CHECKING","kaman@gmail.com","4024 0071 2159 5481","717-555-1632",NULL,"LY","417-555-2521"
++"1680","Wes X Wohl","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27889-7204",70.0,"F","widowed","inactive",10.0,"NO","NO","NO","YES",394.20,101680,"CHECKING","wes.wohl@msn.com","5520111111111121","802-555-1338","505-555-3217","NE","334-555-764"
++"1680","Wes X Wohl","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27889-7204",70.0,"F","widowed","inactive",10.0,"NO","NO","NO","YES",1065.80,201680,"SAVINGS","wes.wohl@msn.com","5520111111111121","802-555-1338","505-555-3217","NE","334-555-764"
++"1681","Carlo T Farabee","Nottingham RoadBelper","85706-4911",72.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",80199.00,101681,"CHECKING","carlo_f@yahoo.com","4146-6643-9004-5458","360-555-2151",NULL,"NF","317-555-4136"
++"1682","Phoumy E Hughes","Sharston RoadManchester","86303-5569",76.0,"F","married","inactive",23.0,"YES","NO","NO","NO",62200.00,101682,"CHECKING","hughes@web.de","3400 000000 00009","785-555-7106",NULL,"BZ","512-555-6295"
++"1683","Rosalie Z Dave","Benton LaneNEWCASTLE Upon Tyne","78028-4811",62.0,"M","married","pensioner",22.0,"NO","YES","YES","YES",497.07,101683,"CHECKING","rosaliedave@yahoo.com","30210111161229","601-555-3070",NULL,"SK","207-555-3619"
++"1683","Rosalie Z Dave","Benton LaneNEWCASTLE Upon Tyne","78028-4811",62.0,"M","married","pensioner",22.0,"NO","YES","YES","YES",1343.93,201683,"SAVINGS","rosaliedave@yahoo.com","30210111161229","601-555-3070",NULL,"SK","207-555-3619"
++"1684","Min M Broda","35 Livery StreetBirmingham","75397-1342",75.0,"F","widowed","inactive",28.0,"YES","NO","NO","NO",24108.00,101684,"SAVINGS","broda@gmx.net","30210111161229","505-555-2090","804-555-8083","LR","916-555-6559"
++"1685","Garnet W Neyens","148 Edmund StreetBirmingham","33441-2199",24.0,"M","single","inactive",14.0,"YES","NO","NO","YES",6396.00,101685,"SAVINGS","garnet_n@msl.org","213130951856200","603-555-2129","401-555-3399","IE","503-555-8584"
++"1686","Alfredo H Howell","10 Mordaunt RoadLondon","98382-7456",36.0,"M","married","executives,self-employed",5.0,"NO","YES","YES","YES",1724.49,101686,"SAVINGS","howell@yahoo.com","213130951856200","843-555-1537","860-555-4998","IS","512-555-5097"
++"1686","Alfredo H Howell","10 Mordaunt RoadLondon","98382-7456",36.0,"M","married","executives,self-employed",5.0,"NO","YES","YES","YES",4662.51,201686,"CHECKING","howell@yahoo.com","213130951856200","843-555-1537","860-555-4998","IS","512-555-5097"
++"1687","Winford B Flin","SmallfieldHorley","85635",64.0,"M","single","pensioner",28.0,"YES","NO","NO","NO",13838.00,101687,"SAVINGS","winford_flin@msl.org","5462522444922689","785-555-5819",NULL,"CR","flin@de.ibm.com"
++"1688","Brantley V Hemmingson","HatfieldHertfordshire","80917-5710",43.0,"F","single","employee",7.0,"NO","YES","NO","YES",-90.00,101688,"SAVINGS","hemmingson@ibm.com","38111111111119","843-555-7210",NULL,"MA","brantley_hemmingson@blue.com"
++"1689","Lou J Kindred","Church RoadBristol","33803-8359",20.0,"M","single","inactive",3.0,"NO","NO","NO","NO",238.14,101689,"SAVINGS","lou_kindred@t-online.de","5580977968891503","804-555-5723",NULL,"KP","502-555-8345"
++"1689","Lou J Kindred","Church RoadBristol","33803-8359",20.0,"M","single","inactive",3.0,"NO","NO","NO","NO",643.86,201689,"CHECKING","lou_kindred@t-online.de","5580977968891503","804-555-5723",NULL,"KP","502-555-8345"
++"1690","Klaus S Teraberry","KING STREETWARWICKSHIRE","33912",56.0,"M","single","farmer",15.0,"NO","NO","YES","NO",74017.00,101690,"SAVINGS","teraberry@yahoo.com","38111111111119","603-555-8467","601-555-1326","DJ","klaus_teraberry@ibm.com"
++"1691","Tilly U Mcgill","224 Marsh HillBirmingham","32703-8504",17.0,"M","child","inactive",1.0,"NO","NO","NO","YES",-643.00,101691,"SAVINGS","tmcgill@ccdef.net","30210111161229","605-555-7316",NULL,"SL","605-555-7275"
++"1692","Micahel X Jesten","STAFFORD PARK 15TELFORD","98365-9634",42.0,"F","married","intermediate professions",24.0,"NO","NO","YES","YES",3461.13,101692,"SAVINGS","jesten@ccdef.net",NULL,"603-555-4028","651-555-3281","BV","602-555-9419"
++"1692","Micahel X Jesten","STAFFORD PARK 15TELFORD","98365-9634",42.0,"F","married","intermediate professions",24.0,"NO","NO","YES","YES",9357.86,201692,"CHECKING","jesten@ccdef.net",NULL,"603-555-4028","651-555-3281","BV","602-555-9419"
++"1693","Rupert U Bagley","Industrial Est.Witney","70809",34.0,"F","married","inactive",4.0,"NO","YES","NO","YES",2272.00,101693,"CHECKING","rbagley@ibm.com","36111111111111","802-555-4754",NULL,"KY","208-555-7159"
++"1694","Klaus T Mcnabb","WittonBirmingham","77282-0285",62.0,"M","married","pensioner",13.0,"NO","NO","YES","NO",19797.00,101694,"CHECKING","klaus_mcnabb@ibm.com","30011111111119","208-555-1005","717-555-7379","MC","808-555-2337"
++"1695","Julius I Schmelzer","WittonBirmingham","30030",68.0,"F","married","inactive",18.0,"YES","NO","YES","NO",3044.25,101695,"CHECKING","jschmelzer@yahoo.com","30011111111119","404-555-5053","302-555-3147","SE","701-555-799"
++"1695","Julius I Schmelzer","WittonBirmingham","30030",68.0,"F","married","inactive",18.0,"YES","NO","YES","NO",8230.75,201695,"SAVINGS","jschmelzer@yahoo.com","30011111111119","404-555-5053","302-555-3147","SE","701-555-799"
++"1696","Suzan E Flakes","STAND PARK / SHEFFIELD ROAD STAND PARK / SHEFFIELD ROAD CHESTERFIELD DERBY","85251-1255",85.0,"M","married","pensioner",29.0,"NO","NO","YES","NO",42275.00,101696,"CHECKING","suzan_f@aol.com","5169-7990-9185-4334","503-555-4390",NULL,"MA","360-555-7513"
++"1697","Terri B Duvall","Birmingham RoadRedditch","32819-8499",68.0,"F","widowed","inactive",20.0,"YES","NO","NO","NO",2135.00,101697,"CHECKING","tduvall@ibm.com","3530 1113 3330 0000","916-555-8279",NULL,"ZM","601-555-3680"
++"1698","Debbie P Mcgee","STOCKPORTENGLAND","98382-8005",66.0,"F","married","farmer",29.0,"YES","NO","YES","NO",2756.97,101698,"CHECKING","debbie_mcgee@de.ibm.com","5423111111111111","615-555-2012","505-555-6987","GI","517-555-1428"
++"1698","Debbie P Mcgee","STOCKPORTENGLAND","98382-8005",66.0,"F","married","farmer",29.0,"YES","NO","YES","NO",7454.03,201698,"SAVINGS","debbie_mcgee@de.ibm.com","5423111111111111","615-555-2012","505-555-6987","GI","517-555-1428"
++"1699","Homer R Magliozzi","WEST WORLD WESTGATELONDON","1441",24.0,"M","single","employee",8.0,"NO","YES","NO","YES",8625.00,101699,"SAVINGS","homer_m@gmx.net","4146664390045458",NULL,NULL,"VU","602-555-5456"
++"1700","Stuart H Schenbeck","CLARKE STREETDERBY","29577",55.0,"M","married","farmer",26.0,"NO","NO","YES","NO",38095.00,101700,"SAVINGS","schenbeck@msl.org","5169 7990 9185 4334","401-555-2836","904-555-3078","AL","stuart_s@msl.org"
++"1701","Shan L Leavitt","Hail WestonCAMBS","87571-1104",40.0,"F","married","inactive",12.0,"NO","YES","YES","YES",-43.20,101701,"SAVINGS","leavitt@ibm.com","5285696282092972","360-555-8096",NULL,"PY",NULL
++"1701","Shan L Leavitt","Hail WestonCAMBS","87571-1104",40.0,"F","married","inactive",12.0,"NO","YES","YES","YES",-116.80,201701,"CHECKING","leavitt@ibm.com","5285696282092972","360-555-8096",NULL,"PY",NULL
++"1702","Jules T Harper","Sherbourne DriveTilbrook Milton Keyn","37912-1397",44.0,"M","married","employee",20.0,"NO","NO","YES","NO",7678.00,101702,"SAVINGS","harper@ccdef.net","5169-7990-9185-4334","850-555-6779",NULL,"BA","503-555-9815"
++"1703","Edmund H Snow","9 Coneygre Industrial EstateTipton","29414-6484",50.0,"F","married","employee",14.0,"NO","YES","YES","NO",13007.00,101703,"SAVINGS","snow@icloud.com","3528-3095-1856-2063","406-555-7530","208-555-1203","AE","208-555-9087"
++"1704","Romulo V Wegener","WythenshaweManchester","29116-1143",67.0,"F","widowed","pensioner",16.0,"NO","NO","NO","YES",4071.60,101704,"SAVINGS","romulo.wegener@icloud.com","5383908528354962","617-555-4132","515-555-6446","SK","317-555-4815"
++"1704","Romulo V Wegener","WythenshaweManchester","29116-1143",67.0,"F","widowed","pensioner",16.0,"NO","NO","NO","YES",11008.40,201704,"CHECKING","romulo.wegener@icloud.com","5383908528354962","617-555-4132","515-555-6446","SK","317-555-4815"
++"1705","Janette H Nelsen","DentonManchester","32304",63.0,"F","married","pensioner",21.0,"NO","NO","YES","NO",19139.00,101705,"SAVINGS","janette_n@de.ibm.com","38111111111119","605-555-5773","502-555-1821","SA","401-555-6729"
++"1706","Myra S Moshir","Wigston Wigston Leicester","33773-5315",80.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",58045.00,101706,"SAVINGS","myra_moshir@t-online.de","3528-3095-1856-2063","517-555-7981",NULL,"RE","802-555-4327"
++"1707","Jon H Bickmore","SpondonDerby","77055-1208",77.0,"F","widowed","pensioner",17.0,"YES","NO","NO","YES",6166.80,101707,"SAVINGS","bickmore@web.de","4024007121595481","517-555-2795",NULL,"CN","614-555-5144"
++"1707","Jon H Bickmore","SpondonDerby","77055-1208",77.0,"F","widowed","pensioner",17.0,"YES","NO","NO","YES",16673.20,201707,"CHECKING","bickmore@web.de","4024007121595481","517-555-2795",NULL,"CN","614-555-5144"
++"1708","Rolly Z Stanske","69 MOLAND STRRETBIRMINGHAM","98362",64.0,"M","divorced","inactive",14.0,"NO","NO","NO","YES",4025.00,101708,"SAVINGS","rolly_s@ibm.com","4146664390045458","334-555-3650",NULL,"RE","404-555-4909"
++"1709","Mohammad Y Alcain","HAY HILL ROADBIRMINGHAM","72947-8504",26.0,"F","single","inactive",3.0,"NO","YES","NO","YES",-1204.00,101709,"SAVINGS","malcain@msl.org","5169-7990-9185-4334","503-555-6216","405-555-2128","NR",NULL
++"1710","Alva A Binkley","KETTLES WOOD DRIVEBIRMINGHAM","78070",82.0,"M","widowed","pensioner",23.0,"YES","NO","NO","NO",2609.01,101710,"SAVINGS","abinkley@icloud.com","30210111161229","417-555-7400",NULL,"US",NULL
++"1710","Alva A Binkley","KETTLES WOOD DRIVEBIRMINGHAM","78070",82.0,"M","widowed","pensioner",23.0,"YES","NO","NO","NO",7053.99,201710,"CHECKING","abinkley@icloud.com","30210111161229","417-555-7400",NULL,"US",NULL
++"1711","Trevor V Rodie","Leabrook RoadWEST MIDLANDS","32608-4748",50.0,"F","married","employee",26.0,"NO","NO","YES","NO",35930.00,101711,"SAVINGS","trevor_r@cicn.gov",NULL,"775-555-5151",NULL,"MZ","651-555-6501"
++"1712","Reva Q Schartow","MOUNTBATTEN HOUSE BASING VIEW MOUNTBATTEN HOUSE BASING VIEW BASINGSTOKE","85383-3236",67.0,"F","married","inactive",24.0,"NO","NO","YES","NO",271033.00,101712,"SAVINGS",NULL,"4024007121595481","617-555-3508","515-555-9401","DJ","reva_s@icloud.com"
++"1713","John Y Gerkin","White Horse Business ParkTrowbridge","46266",65.0,"F","married","inactive",36.0,"YES","NO","YES","YES",11958.03,101713,"SAVINGS","john_g@aol.com","3528-3095-1856-2063","603-555-2429",NULL,"PY","john_gerkin@aol.com"
++"1713","John Y Gerkin","White Horse Business ParkTrowbridge","46266",65.0,"F","married","inactive",36.0,"YES","NO","YES","YES",32330.96,201713,"CHECKING","john_g@aol.com","3528-3095-1856-2063","603-555-2429",NULL,"PY","john_gerkin@aol.com"
++"1714","Michele B Friesen","RAMSBOTTOMLANCS.","85374-7038",30.0,"M","married","employee",3.0,"NO","NO","YES","NO",731.00,101714,"CHECKING","michele.friesen@msn.com","36111111111111","517-555-2630","406-555-7504","MC","402-555-2323"
++"1715","Pedro N Wang","WimborneDorset","28787-9219",47.0,"F","married","inactive",23.0,"NO","NO","YES","NO",2561.00,101715,"CHECKING","wang@blue.com","3400-000000-00009","307-555-8436",NULL,"MK","651-555-1077"
++"1716","Hunkyi A Cantrell","WELWYN ELECTRONICS PARK BEDLINNORTHUMBERLAND","29402-0568",22.0,"F","single","inactive",6.0,"NO","NO","NO","YES",1835.46,101716,"CHECKING","cantrell@yahoo.com","378282246310005","385-555-4751",NULL,"GP","605-555-6337"
++"1716","Hunkyi A Cantrell","WELWYN ELECTRONICS PARK BEDLINNORTHUMBERLAND","29402-0568",22.0,"F","single","inactive",6.0,"NO","NO","NO","YES",4962.54,201716,"SAVINGS","cantrell@yahoo.com","378282246310005","385-555-4751",NULL,"GP","605-555-6337"
++"1717","Elvira N Wetzel","StirchleyBirmingham","77084-2815",27.0,"M","single","inactive",15.0,"YES","NO","YES","NO",7644.00,101717,"CHECKING","wetzel@ccdef.net","36111111111111","850-555-4882",NULL,"SV","517-555-5647"
++"1718","Gene H Torley","5 Warf LaneLeicester","77092-7028",71.0,"M","married","inactive",26.0,"NO","NO","NO","NO",13928.00,101718,"CHECKING","torley@msn.com","3400 000000 00009","405-555-5966",NULL,"AW","334-555-9131"
++"1719","Roselee D Mesa","Stretford Stretford Manchester","34104-7017",43.0,"F","married","farmer",23.0,"YES","NO","YES","NO",14454.72,101719,"SAVINGS","roselee_m@yahoo.com","5462522444922689","614-555-3853",NULL,"CX","401-555-4700"
++"1719","Roselee D Mesa","Stretford Stretford Manchester","34104-7017",43.0,"F","married","farmer",23.0,"YES","NO","YES","NO",39081.28,201719,"CHECKING","roselee_m@yahoo.com","5462522444922689","614-555-3853",NULL,"CX","401-555-4700"
++"1720","Meir N Wightman","Great Western WaySwindon","83864-6219",66.0,"M","married","employee",24.0,"YES","NO","YES","NO",75037.00,101720,"SAVINGS",NULL,"30411111111111","302-555-1982",NULL,"AG","808-555-2294"
++"1721","Carrine D Brigham","NorthgateAldridge","33527",32.0,"M","single","inactive",8.0,"NO","YES","NO","YES",265.00,101721,"SAVINGS","carrine_b@icloud.com",NULL,"202-555-2158",NULL,"MX","carrine_brigham@msl.org"
++"1722","Shawn O Arciga","105 Devonshire RoadLondon","31193-2134",43.0,"M","married","worker",10.0,"NO","NO","NO","YES",9997.56,101722,"SAVINGS","shawn_a@aol.com","4024-0071-2159-5481","802-555-6965",NULL,"MC","608-555-1447"
++"1722","Shawn O Arciga","105 Devonshire RoadLondon","31193-2134",43.0,"M","married","worker",10.0,"NO","NO","NO","YES",27030.44,201722,"CHECKING","shawn_a@aol.com","4024-0071-2159-5481","802-555-6965",NULL,"MC","608-555-1447"
++"1723","Rupert X Baumgartner","Talisman RoadBicester Oxon OX6 6J","34994-3953",85.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",21086.00,101723,"SAVINGS","baumgartner@de.ibm.com","5169 7990 9185 4334","907-555-8024","907-555-9916","KE","515-555-103"
++"1724","Merle Q Lamb","Arlington Way Sundorne Retail Arlington Way Sundorne Retail Shrewsbury","63111",68.0,"M","married","pensioner",26.0,"YES","NO","NO","YES",36573.00,101724,"SAVINGS",NULL,"5383908528354962","904-555-4761","916-555-7446","SK",NULL
++"1725","Jesse T Drake","----------Kingswinford","86336-3804",73.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",10480.05,101725,"SAVINGS",NULL,"3400 000000 00009","907-555-4025",NULL,"KW","916-555-1472"
++"1725","Jesse T Drake","----------Kingswinford","86336-3804",73.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",28334.95,201725,"CHECKING",NULL,"3400 000000 00009","907-555-4025",NULL,"KW","916-555-1472"
++"1726","Lucas I Fraser","----------WOOTTON BASSET","98324",68.0,"M","single","pensioner",26.0,"YES","NO","NO","YES",15522.00,101726,"SAVINGS","lfraser@ccdef.net","30411111111111","360-555-4682","385-555-8153","HK","404-555-778"
++"1727","Amber H Fonder","9 Coneygre Industrial EstateTipton","29340-6536",50.0,"M","single","employee",11.0,"NO","NO","NO","NO",132770.00,101727,"SAVINGS","amber_fonder@de.ibm.com","30411111111111","615-555-432","518-555-2808","LS","208-555-2334"
++"1728","Easter W Cessna","HAY HILL ROADBIRMINGHAM","86001-6296",72.0,"F","married","inactive",26.0,"NO","NO","YES","NO",7718.76,101728,"CHECKING","cessna@gmx.net","340000000000009","603-555-2276","609-555-2190","SO",NULL
++"1728","Easter W Cessna","HAY HILL ROADBIRMINGHAM","86001-6296",72.0,"F","married","inactive",26.0,"NO","NO","YES","NO",20869.23,201728,"SAVINGS","cessna@gmx.net","340000000000009","603-555-2276","609-555-2190","SO",NULL
++"1729","Jana A Phillip","----------CHICHESTER WEST SUSS","77868-3732",34.0,"M","married","employee",9.0,"NO","NO","NO","NO",23403.00,101729,"CHECKING","jphillip@cicn.gov","4024 0071 2159 5481","417-555-6307",NULL,"YE","317-555-6945"
++"1730","Georganne U Asbahr","FACTORY ROADUPTON - POOLE - DORS","30340-1452",75.0,"F","widowed","pensioner",37.0,"NO","NO","NO","NO",9112.00,101730,"CHECKING","asbahr@aol.com","4146-6643-9004-5458","401-555-3063",NULL,"SI","916-555-5956"
++"1731","Namil R Weir","StirchleyBirmingham","32609-3323",77.0,"F","married","inactive",9.0,"YES","NO","YES","NO",8183.43,101731,"CHECKING","weir@gmx.net","3528309518562063","404-555-7490",NULL,"LY","518-555-7548"
++"1731","Namil R Weir","StirchleyBirmingham","32609-3323",77.0,"F","married","inactive",9.0,"YES","NO","YES","NO",22125.57,201731,"SAVINGS","weir@gmx.net","3528309518562063","404-555-7490",NULL,"LY","518-555-7548"
++"1732","Gardner O Acquaro","BRAC CREDIT INTERNATIONALHERTS","33102-5115",72.0,"F","widowed","pensioner",4.0,"NO","NO","NO","YES",4504.00,101732,"CHECKING","gardner.acquaro@aol.com","5169-7990-9185-4334","512-555-4064","202-555-9398","MP",NULL
++"1733","Sergio Q Tandy","Bishops StortfordHertfordshire","85082-7983",20.0,"M","single","worker",2.0,"NO","NO","NO","YES",2134.00,101733,"CHECKING","standy@aol.com","6520224090045455","405-555-7798",NULL,"ML","775-555-5788"
++"1734","Lamont R Soucy","----------Worthing","30029",20.0,"F","single","inactive",1.0,"NO","NO","NO","YES",1386.18,101734,"CHECKING","lamont_soucy@gmx.net","5580977968891503","402-555-6474",NULL,"GA","651-555-6097"
++"1734","Lamont R Soucy","----------Worthing","30029",20.0,"F","single","inactive",1.0,"NO","NO","NO","YES",3747.81,201734,"SAVINGS","lamont_soucy@gmx.net","5580977968891503","402-555-6474",NULL,"GA","651-555-6097"
++"1735","Eva D Fontes","WythenshaweManchester","77092-7028",21.0,"F","single","inactive",4.0,"NO","YES","NO","YES",5605.00,101735,"CHECKING","evafontes@msl.org","180030951856201","501-555-1875",NULL,"PH","417-555-8433"
++"1736","Liam Q Lemuel","58/59 Lower High StreetWest Midlands","33755-1008",44.0,"M","single","worker",23.0,"NO","NO","NO","YES",24924.00,101736,"CHECKING","lemuel@yahoo.com","5169799091854334","804-555-8311",NULL,"BT","608-555-1769"
++"1737","Steven W Liedahl","Barton BlountChurch Broughton","40290",44.0,"F","married","employee",28.0,"NO","NO","YES","NO",1248.21,101737,"CHECKING","liedahl@msn.com",NULL,"385-555-5567","603-555-3180","BG","904-555-8114"
++"1737","Steven W Liedahl","Barton BlountChurch Broughton","40290",44.0,"F","married","employee",28.0,"NO","NO","YES","NO",3374.79,201737,"SAVINGS","liedahl@msn.com",NULL,"385-555-5567","603-555-3180","BG","904-555-8114"
++"1738","Gene C Gregory","DO NOT MAILKenilworth","34688-5000",66.0,"F","married","worker",23.0,"NO","NO","NO","YES",29602.00,101738,"CHECKING","gregory@web.de","6520224090045455","406-555-1780",NULL,"PK","208-555-8645"
++"1739","Fergus V Fiore","3-5 swallow placwMayfairLondon","59930-0100",59.0,"M","single","farmer",23.0,"YES","NO","NO","YES",40225.00,101739,"CHECKING","ffiore@msl.org","30011111111119","307-555-2864","517-555-4312","PR","417-555-7930"
++"1740","Jayne L Severson","HestonHounslow","34230",19.0,"M","single","inactive",5.0,"YES","YES","NO","YES",-7.29,101740,"CHECKING","jayne.severson@gmx.net","30411111111111","517-555-5465",NULL,"LR","401-555-5197"
++"1740","Jayne L Severson","HestonHounslow","34230",19.0,"M","single","inactive",5.0,"YES","YES","NO","YES",-19.71,201740,"SAVINGS","jayne.severson@gmx.net","30411111111111","517-555-5465",NULL,"LR","401-555-5197"
++"1741","Mortique X Parente","Perry BarrBirmingham","77619",52.0,"F","married","inactive",4.0,"NO","YES","NO","YES",1298.00,101741,"CHECKING",NULL,"5520111111111121","307-555-534","808-555-5093","IE","317-555-2244"
++"1742","Sherwood O Gray","Arlington Way Sundorne RetailShrewsbury","30076-3738",50.0,"F","married","worker",23.0,"NO","NO","YES","NO",33409.00,101742,"SAVINGS","sherwood_g@icloud.com","30111111161229","717-555-5545",NULL,"FI","sherwood_gray@msl.org"
++"1743","Mai M Reider","MiddletonManchester","33811-1293",64.0,"F","married","inactive",13.0,"YES","NO","YES","NO",3452.49,101743,"SAVINGS","reider@msn.com","4024-0071-2159-5481","808-555-5028","804-555-51","SZ","515-555-9021"
++"1743","Mai M Reider","MiddletonManchester","33811-1293",64.0,"F","married","inactive",13.0,"YES","NO","YES","NO",9334.51,201743,"CHECKING","reider@msn.com","4024-0071-2159-5481","808-555-5028","804-555-51","SZ","515-555-9021"
++"1744","Newton C Dodson","Halesfield.2Shropshire","77092-7028",76.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",12074.00,101744,"SAVINGS","newton_dodson@blue.com","3530 1113 3330 0000","603-555-9296","515-555-8779","UM",NULL
++"1745","Mac W Barchi","GREENHILL LANEDERBYSHIRE","77016-2723",59.0,"F","married","farmer",35.0,"YES","NO","YES","NO",25346.00,101745,"SAVINGS","barchi@yahoo.com","5580977968891503","804-555-1448",NULL,"TR","512-555-1957"
++"1746","Rosmira V Barrett","---------- BEDLINGTON NORTHUMBERLAND ENGLAND","33687-6579",74.0,"M","married","pensioner",26.0,"NO","NO","YES","YES",5516.37,101746,"SAVINGS","barrett@ibm.com","36111111111111","502-555-1908",NULL,"LY","404-555-1049"
++"1746","Rosmira V Barrett","---------- BEDLINGTON NORTHUMBERLAND ENGLAND","33687-6579",74.0,"M","married","pensioner",26.0,"NO","NO","YES","YES",14914.63,201746,"CHECKING","barrett@ibm.com","36111111111111","502-555-1908",NULL,"LY","404-555-1049"
++"1747","Georges R Donahue","Great BarrBirmingham","27983-7491",66.0,"F","widowed","pensioner",25.0,"YES","NO","NO","YES",4228.00,101747,"SAVINGS","georges_d@icloud.com","5169 7990 9185 4334","501-555-2606","303-555-2225","PG","609-555-5314"
++"1748","Morris G Alvares","Leabrook RoadWEST MIDLANDS","78003-4207",41.0,"M","single","employee",15.0,"NO","YES","NO","YES",61.00,101748,"SAVINGS","morris.alvares@gmx.net","213130951856200","615-555-2666",NULL,"CH","603-555-9049"
++"1749","Gayle D Shaner","Nottingham RoadBelper","85749-9359",61.0,"M","married","worker",20.0,"NO","NO","YES","YES",6542.91,101749,"SAVINGS","shaner@yahoo.com","36111111111111","505-555-6625",NULL,"MQ","404-555-7271"
++"1749","Gayle D Shaner","Nottingham RoadBelper","85749-9359",61.0,"M","married","worker",20.0,"NO","NO","YES","YES",17690.09,201749,"CHECKING","shaner@yahoo.com","36111111111111","505-555-6625",NULL,"MQ","404-555-7271"
++"1750","Delmer F Scronce","MinworthSutton Coldfield","85202-1044",25.0,"F","single","inactive",4.0,"NO","NO","NO","YES",1170.00,101750,"SAVINGS","delmer.scronce@icloud.com","30310111161029","401-555-3167","602-555-9384","NG","405-555-4835"
++"1751","Ronna N Risley","Parkeston QuayHarwich","45840-9780",20.0,"F","single","inactive",3.0,"NO","NO","NO","YES",4860.00,101751,"SAVINGS","ronna_r@icloud.com","5285696282092972","515-555-5377","503-555-3718","LK","860-555-4583"
++"1752","Rona G Schissel","ErdingtonBirmingham","59806-0327",45.0,"M","married","worker",20.0,"NO","YES","YES","YES",187.38,101752,"CHECKING","schissel@gmx.net","340000000000009","503-555-1645","517-555-1485","LY","307-555-6385"
++"1752","Rona G Schissel","ErdingtonBirmingham","59806-0327",45.0,"M","married","worker",20.0,"NO","YES","YES","YES",506.62,201752,"SAVINGS","schissel@gmx.net","340000000000009","503-555-1645","517-555-1485","LY","307-555-6385"
++"1753","Noreen A Rivero","Barkby RoadLeicester","32315",51.0,"M","married","employee",20.0,"NO","NO","YES","NO",37743.00,101753,"CHECKING","noreen_rivero@yahoo.com","30111111161229",NULL,NULL,"CY","303-555-8591"
++"1755","Loujenia H Agen","GrennockInverclyde","30353-0416",40.0,"M","married","employee",11.0,"NO","NO","YES","NO",391.50,101755,"CHECKING","loujenia_agen@blue.com","30310111161029","512-555-5037",NULL,"YE","loujenia_a@icloud.com"
++"1755","Loujenia H Agen","GrennockInverclyde","30353-0416",40.0,"M","married","employee",11.0,"NO","NO","YES","NO",1058.50,201755,"SAVINGS","loujenia_agen@blue.com","30310111161029","512-555-5037",NULL,"YE","loujenia_a@icloud.com"
++"1756","Suzette U Karamanian","Bellbrook ParkUckfield","32409-1695",71.0,"M","married","pensioner",32.0,"YES","NO","YES","NO",37303.00,101756,"CHECKING","suzettekaramanian@t-online.de","180030951856201","804-555-2992",NULL,"PY","401-555-3361"
++"1757","Zana Y Whitman","105 Devonshire Road 105 Devonshire Road London","77092-7028",65.0,"M","widowed","pensioner",26.0,"NO","NO","NO","YES",97837.00,101757,"CHECKING","zwhitman@ccdef.net",NULL,"907-555-8587",NULL,"BY","402-555-8072"
++"1758","Sal Y Sinkler","Hall GreenBirmingham","29180",74.0,"F","married","pensioner",24.0,"YES","NO","YES","NO",17977.95,101758,"CHECKING","sal.sinkler@ccdef.net",NULL,"307-555-4278",NULL,"GM","sal.sinkler@msn.com"
++"1758","Sal Y Sinkler","Hall GreenBirmingham","29180",74.0,"F","married","pensioner",24.0,"YES","NO","YES","NO",48607.04,201758,"SAVINGS","sal.sinkler@ccdef.net",NULL,"307-555-4278",NULL,"GM","sal.sinkler@msn.com"
++"1759","Tibor H Evernham","Chandler`s FordEastleigh","75381-9060",70.0,"M","married","inactive",22.0,"YES","NO","YES","NO",12689.00,101759,"CHECKING","tevernham@yahoo.com","180030951856201","502-555-8135",NULL,"TK","tibor_e@cicn.gov"
++"1760","Cara T Hughes","Industrial Est.Witney","27263-3163",66.0,"F","married","inactive",26.0,"NO","NO","YES","NO",40240.00,101760,"CHECKING","cara_hughes@ccdef.net","180030951856201","701-555-1931","573-555-4239","EG","802-555-4282"
++"1761","Sammye N Munsch","----------NEWCASTLE UPON TYNE","29020-9557",44.0,"M","widowed","worker",18.0,"YES","NO","NO","NO",11303.01,101761,"CHECKING","munsch@gmx.net","5169 7990 9185 4334","401-555-3587",NULL,"KE","515-555-2339"
++"1761","Sammye N Munsch","----------NEWCASTLE UPON TYNE","29020-9557",44.0,"M","widowed","worker",18.0,"YES","NO","NO","NO",30559.98,201761,"SAVINGS","munsch@gmx.net","5169 7990 9185 4334","401-555-3587",NULL,"KE","515-555-2339"
++"1762","Kaye F Worrell","BanburyOxon","32304",57.0,"F","married","farmer",20.0,"YES","NO","NO","YES",21691.00,101762,"CHECKING","worrell@gmail.com","30510011111111","302-555-5988","515-555-46","LI","808-555-2342"
++"1763","Violetta H Manion","110 KEW GREEN / KEW RICHMONDSURREY","77546-4856",22.0,"M","single","inactive",5.0,"YES","NO","NO","YES",1483.00,101763,"CHECKING","violetta_manion@yahoo.com","4146 6643 9004 5458","785-555-7623","617-555-2797","SB","505-555-8632"
++"1764","Harris D Leahy","22 HANOVER SQUARELONDON","34689-2798",78.0,"F","widowed","inactive",12.0,"YES","NO","NO","NO",6053.13,101764,"CHECKING","leahy@t-online.de","38111111111119","804-555-5994",NULL,"CH","406-555-5543"
++"1764","Harris D Leahy","22 HANOVER SQUARELONDON","34689-2798",78.0,"F","widowed","inactive",12.0,"YES","NO","NO","NO",16365.86,201764,"SAVINGS","leahy@t-online.de","38111111111119","804-555-5994",NULL,"CH","406-555-5543"
++"1765","Lin B Needleman","RedditchWorcester","86339",74.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",36433.00,101765,"CHECKING","lin_n@gmx.net","3528-3095-1856-2063","501-555-6265",NULL,"PE","614-555-8338"
++"1766","Carlos S Kirts","Leabrook RoadWEST MIDLANDS","87176",36.0,"F","married","employee",5.0,"NO","NO","YES","YES",5782.00,101766,"CHECKING","carloskirts@icloud.com","36111111111111","717-555-6307",NULL,"AU","701-555-5337"
++"1767","Hudson Y Connors","Bishops StortfordHertfordshire","85202",60.0,"M","married","farmer",32.0,"NO","NO","NO","YES",2329.02,101767,"CHECKING","hconnors@msn.com","4024007121595481","401-555-5175",NULL,"SV","hudson_connors@ccdef.net"
++"1767","Hudson Y Connors","Bishops StortfordHertfordshire","85202",60.0,"M","married","farmer",32.0,"NO","NO","NO","YES",6296.98,201767,"SAVINGS","hconnors@msn.com","4024007121595481","401-555-5175",NULL,"SV","hudson_connors@ccdef.net"
++"1768","Roxie W Woolley","1 CHURCH ROWKENT","77080",53.0,"F","divorced","inactive",11.0,"YES","NO","YES","NO",16178.00,101768,"CHECKING","rwoolley@web.de","3400 000000 00009","717-555-8575","512-555-4065","CV","603-555-497"
++"1769","Bryon J Ghrist","AllingtonMaidstone","85206-3481",26.0,"F","married","intermediate professions",0.0,"NO","NO","YES","NO",9784.00,101769,"CHECKING","ghrist@de.ibm.com","5169-7990-9185-4334","505-555-1974",NULL,"BN","401-555-8962"
++"1770","Edith N Davis","COLNBROOKSLOUGH","77092-7028",23.0,"F","single","inactive",3.0,"NO","YES","NO","YES",329.94,101770,"CHECKING","edith_davis@aol.com","5520111111111121","609-555-4454",NULL,"CU","davis@ibm.com"
++"1770","Edith N Davis","COLNBROOKSLOUGH","77092-7028",23.0,"F","single","inactive",3.0,"NO","YES","NO","YES",892.06,201770,"SAVINGS","edith_davis@aol.com","5520111111111121","609-555-4454",NULL,"CU","davis@ibm.com"
++"1771","Jena-Lee G Diller","Holder RoadAldershot","74464-3603",63.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",28441.00,101771,"CHECKING","diller@t-online.de","30411111111111","651-555-3139",NULL,"AD","jena-lee_diller@icloud.com"
++"1772","Bonisace J Pacolt","BanburyOxon","25177",19.0,"F","single","inactive",1.0,"NO","NO","NO","YES",773.00,101772,"CHECKING","bonisace_p@ccdef.net","5462522444922689","404-555-2439","775-555-9452","WF","603-555-4181"
++"1773","Darrell N Thronson","FACTORY ROADUPTON - POOLE - DORS","27606-3386",71.0,"F","married","inactive",34.0,"YES","NO","YES","NO",3784.59,101773,"CHECKING","dthronson@yahoo.com","38111111111119","334-555-4224",NULL,"AE","225-555-300"
++"1773","Darrell N Thronson","FACTORY ROADUPTON - POOLE - DORS","27606-3386",71.0,"F","married","inactive",34.0,"YES","NO","YES","NO",10232.41,201773,"SAVINGS","dthronson@yahoo.com","38111111111119","334-555-4224",NULL,"AE","225-555-300"
++"1774","Lavern C Heim","224 Marsh HillBirmingham","27563",67.0,"M","married","farmer",26.0,"YES","NO","YES","NO",11253.00,101774,"CHECKING","lavern_h@yahoo.com","5580977968891503","602-555-4021","360-555-1578","GT","lheim@msl.org"
++"1775","Gennadi P Donk","CLITTAFORD RD SOUTHWAYPLYMOUTH","96540-2200",71.0,"F","married","pensioner",35.0,"YES","NO","YES","NO",41714.00,101775,"CHECKING","gdonk@yahoo.com","3528309518562063","802-555-38","904-555-4072","BY","405-555-9879"
++"1776","Mason X Agyurre","Berkswell RoadMeriden","75220-2314",73.0,"M","married","pensioner",35.0,"YES","NO","YES","YES",11262.78,101776,"CHECKING","agyurre@web.de","30510011111111","802-555-4022",NULL,"IN","401-555-4873"
++"1776","Mason X Agyurre","Berkswell RoadMeriden","75220-2314",73.0,"M","married","pensioner",35.0,"YES","NO","YES","YES",30451.21,201776,"SAVINGS","agyurre@web.de","30510011111111","802-555-4022",NULL,"IN","401-555-4873"
++"1777","Maithreyi V Dobson","2 Cornwall StreetBirmingham","30083",72.0,"F","widowed","pensioner",21.0,"YES","NO","NO","YES",69224.00,101777,"SAVINGS","maithreyidobson@msl.org","4146 6643 9004 5458","651-555-8876","609-555-7327","FX","405-555-7739"
++"1778","Alice C Ferri","----------NEWCASTLE UPON TYNE","32611-5900",68.0,"M","married","pensioner",40.0,"YES","NO","YES","NO",36777.00,101778,"SAVINGS","alice_ferri@de.ibm.com","180030951856201","615-555-8569","603-555-4422","CN",NULL
++"1779","Joann C Crocker","GASGOIGNE ROADESSEX","32347-9805",63.0,"F","married","inactive",3.0,"NO","NO","NO","NO",10398.24,101779,"SAVINGS","joann_crocker@gmx.net","5169799091854334","804-555-7230","401-555-4915","DZ","573-555-8633"
++"1779","Joann C Crocker","GASGOIGNE ROADESSEX","32347-9805",63.0,"F","married","inactive",3.0,"NO","NO","NO","NO",28113.76,201779,"CHECKING","joann_crocker@gmx.net","5169799091854334","804-555-7230","401-555-4915","DZ","573-555-8633"
++"1780","Sarah E Foster","WEST WORLD WESTGATELONDON","79936-5916",86.0,"M","married","pensioner",19.0,"YES","NO","YES","NO",7722.00,101780,"SAVINGS","sarah_f@t-online.de","3528-3095-1856-2063","605-555-6092",NULL,"MC","405-555-8931"
++"1781","Enrique B Rudlang","56 Springfield RoadBirmingham","66201",40.0,"F","married","executives,self-employed",0.0,"NO","NO","YES","YES",218.00,101781,"SAVINGS","erudlang@cicn.gov","5169 7990 9185 4334","808-555-1151",NULL,"PG","603-555-7636"
++"1782","Hyong E Ruddell","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","30306-2329",25.0,"M","single","inactive",7.0,"NO","YES","NO","YES",267.30,101782,"SAVINGS","hruddell@cicn.gov","30310111161029","515-555-89",NULL,"ET","614-555-6871"
++"1782","Hyong E Ruddell","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","30306-2329",25.0,"M","single","inactive",7.0,"NO","YES","NO","YES",722.69,201782,"CHECKING","hruddell@cicn.gov","30310111161029","515-555-89",NULL,"ET","614-555-6871"
++"1783","Marlin M Greenia","Spondon Spondon Derby","77092-7028",38.0,"M","cohabitant","worker",0.0,"NO","NO","YES","NO",52.00,101783,"SAVINGS","mgreenia@yahoo.com","340000000000009","302-555-5381",NULL,"NF",NULL
++"1784","Amos G Czosnyka","WORLE INDUSTRIAL CENTREAVON","28110",56.0,"F","married","craftsmen, storekeepers",4.0,"NO","YES","NO","YES",-2976.00,101784,"SAVINGS",NULL,"3400-000000-00009","573-555-1373","517-555-930","EE",NULL
++"1785","Lynnette S Goldsberry","Aston Triangle Aston Triangle Birmingham","No ZIP",75.0,"F","widowed","inactive",31.0,"YES","NO","NO","NO",3308.31,101785,"SAVINGS","lynnette_goldsberry@msn.com","5285696282092972","502-555-7266",NULL,"CY","334-555-3426"
++"1785","Lynnette S Goldsberry","Aston Triangle Aston Triangle Birmingham","No ZIP",75.0,"F","widowed","inactive",31.0,"YES","NO","NO","NO",8944.69,201785,"CHECKING","lynnette_goldsberry@msn.com","5285696282092972","502-555-7266",NULL,"CY","334-555-3426"
++"1786","Garland E Kimmel","WimborneDorset","90030-0457",40.0,"F","married","craftsmen, storekeepers",22.0,"NO","NO","YES","NO",3423.00,101786,"SAVINGS","garland_k@yahoo.com","340000000000009","208-555-2404",NULL,"RU","775-555-7605"
++"1787","Gilbert H Corona","Clayton Road ATTN AMY WATSONCP651 Hayes","85016-7929",73.0,"F","widowed","inactive",21.0,"YES","NO","NO","YES",15718.00,101787,"SAVINGS","gcorona@web.de","5169799091854334","401-555-7320","317-555-2725","US","gilbert_c@de.ibm.com"
++"1788","Clifford F Holsinger","37 MADDOX STREETLONDON","79821",66.0,"M","married","pensioner",24.0,"YES","NO","YES","YES",16726.77,101788,"SAVINGS","clifford_holsinger@msl.org","30210111161229","385-555-3793","603-555-1143","PK","clifford.holsinger@blue.com"
++"1788","Clifford F Holsinger","37 MADDOX STREETLONDON","79821",66.0,"M","married","pensioner",24.0,"YES","NO","YES","YES",45224.22,201788,"CHECKING","clifford_holsinger@msl.org","30210111161229","385-555-3793","603-555-1143","PK","clifford.holsinger@blue.com"
++"1789","Avery K Weidman","Beetons Way  Burv St  EdmondsSUFFOLK","66215",67.0,"M","married","pensioner",26.0,"YES","NO","YES","YES",21421.00,101789,"SAVINGS","weidman@ibm.com","5169 7990 9185 4334","843-555-8414",NULL,"FR","502-555-7286"
++"1790","Fannie M Graves","GarstangPreston","99801-1700",22.0,"F","single","inactive",2.0,"NO","YES","NO","YES",676.00,101790,"CHECKING","fgraves@cicn.gov","4146 6643 9004 5458","501-555-6066",NULL,"TN","fanniegraves@t-online.de"
++"1791","Ole D Balboni","SmallfieldHorley","34109-6228",68.0,"F","married","inactive",16.0,"NO","NO","YES","NO",729.27,101791,"CHECKING","ole.balboni@web.de","30111111161229","860-555-7139",NULL,"DK","615-555-2286"
++"1791","Ole D Balboni","SmallfieldHorley","34109-6228",68.0,"F","married","inactive",16.0,"NO","NO","YES","NO",1971.73,201791,"SAVINGS","ole.balboni@web.de","30111111161229","860-555-7139",NULL,"DK","615-555-2286"
++"1792","Hamid X Doorley","257 Great Lister StreetBirmingham","78744-1147",80.0,"F","widowed","pensioner",39.0,"YES","NO","NO","YES",20049.00,101792,"CHECKING","hamid_doorley@cicn.gov","5169799091854334","402-555-2325","573-555-3890","PF","334-555-6627"
++"1793","Gwen L Russell","ALFRETON ROADDERBY","30907-2998",76.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",44929.00,101793,"CHECKING","gwen_r@cicn.gov","4146664390045458","512-555-7803","717-555-6785","MD","503-555-704"
++"1794","Loyd G Westerberg","Great Western WaySwindon","79821",26.0,"M","single","inactive",8.0,"NO","NO","NO","YES",1375.11,101794,"CHECKING","loyd_w@de.ibm.com","3530 1113 3330 0000","302-555-6201",NULL,"MQ","401-555-8803"
++"1794","Loyd G Westerberg","Great Western WaySwindon","79821",26.0,"M","single","inactive",8.0,"NO","NO","NO","YES",3717.89,201794,"SAVINGS","loyd_w@de.ibm.com","3530 1113 3330 0000","302-555-6201",NULL,"MQ","401-555-8803"
++"1795","Armond I Demoss","Edinburgh WayHarlow","78934-4946",73.0,"M","married","pensioner",25.0,"YES","NO","YES","YES",24634.00,101795,"CHECKING","ademoss@yahoo.com","30411111111111","302-555-5782",NULL,"FJ","ademoss@ibm.com"
++"1796","Jacquelynn I Haggard","90 SUMMER LANEBIRMINGHAM","28236",27.0,"F","single","intermediate professions",18.0,"NO","NO","NO","YES",58654.00,101796,"CHECKING","jacquelynnhaggard@yahoo.com","6520224090045455","615-555-3377",NULL,"MZ","808-555-4278"
++"1797","Suzanne G King","CROMWELL ROADCAMBS.","No ZIP",70.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",15940.53,101797,"CHECKING","suzanne_king@ccdef.net","5285696282092972","517-555-5567","208-555-9082","IN","919-555-8181"
++"1797","Suzanne G King","CROMWELL ROADCAMBS.","No ZIP",70.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",43098.47,201797,"SAVINGS","suzanne_king@ccdef.net","5285696282092972","517-555-5567","208-555-9082","IN","919-555-8181"
++"1798","Patry Q Yates","Little Aston RoadAldridge","77327-9333",64.0,"F","married","pensioner",22.0,"NO","NO","YES","NO",21549.00,101798,"SAVINGS","patry_yates@cicn.gov","36111111111111","907-555-818","502-555-7637","RE","207-555-9323"
++"1799","Nicole U Christlieb","257 Great Lister StreetBirmingham","99224-8480",63.0,"F","married","inactive",35.0,"YES","NO","YES","NO",39763.00,101799,"SAVINGS","nchristlieb@icloud.com","5580977968891503","775-555-4173","907-555-8754","TR","402-555-172"
++"1800","Calvo S Buchanan","FARNCOMBE ROADWEST SUSSEX","34684-1233",54.0,"M","single","intermediate professions",29.0,"NO","NO","NO","YES",727.11,101800,"SAVINGS","buchanan@icloud.com","5580977968891503","916-555-71",NULL,"CO","calvo_buchanan@icloud.com"
++"1800","Calvo S Buchanan","FARNCOMBE ROADWEST SUSSEX","34684-1233",54.0,"M","single","intermediate professions",29.0,"NO","NO","NO","YES",1965.88,201800,"CHECKING","buchanan@icloud.com","5580977968891503","916-555-71",NULL,"CO","calvo_buchanan@icloud.com"
++"1801","Shanna K Ajamie","MinworthSutton Coldfield","30052",32.0,"M","married","intermediate professions",10.0,"NO","NO","YES","NO",-1457.00,101801,"SAVINGS","ajamie@cicn.gov","3400-000000-00009","517-555-3281","573-555-3410","GU",NULL
++"1802","Joan Z Foley","StirchleyBirmingham","85233",24.0,"M","single","inactive",4.0,"NO","YES","NO","YES",3420.00,101802,"SAVINGS","foley@aol.com","5580977968891503","573-555-9245","207-555-2396","AM","907-555-7734"
++"1803","Pierre V Payne","6/8 HIGH STREETCHESHIRE","33831-0928",80.0,"M","married","pensioner",45.0,"NO","NO","YES","YES",4879.71,101803,"SAVINGS","ppayne@web.de","3400-000000-00009","601-555-3096",NULL,"NA","pierre_payne@msn.com"
++"1803","Pierre V Payne","6/8 HIGH STREETCHESHIRE","33831-0928",80.0,"M","married","pensioner",45.0,"NO","NO","YES","YES",13193.28,201803,"CHECKING","ppayne@web.de","3400-000000-00009","601-555-3096",NULL,"NA","pierre_payne@msn.com"
++"1804","Wes A Gardeline","SIR FRANCIS LEY IND. PARKDERBY","90640",82.0,"F","widowed","inactive",43.0,"YES","NO","YES","YES",25970.00,101804,"SAVINGS","wes_gardeline@ibm.com","378282246310005","717-555-6760","512-555-8063","SZ","614-555-7736"
++"1805","Lynn H Tanaka","WittonBIRMINGHAM","88268",58.0,"M","married","farmer",26.0,"YES","NO","YES","NO",99998.00,101805,"SAVINGS","lynn_t@msl.org","5423111111111111","785-555-1531","785-555-6794","CR","808-555-4238"
++"1806","Regean N Kaupp","AbergavennyGwent","38478-4711",78.0,"F","widowed","pensioner",31.0,"YES","NO","NO","NO",8233.92,101806,"SAVINGS","regean_k@yahoo.com","5169-7990-9185-4334","401-555-2","775-555-8523","GH","916-555-7997"
++"1806","Regean N Kaupp","AbergavennyGwent","38478-4711",78.0,"F","widowed","pensioner",31.0,"YES","NO","NO","NO",22262.07,201806,"CHECKING","regean_k@yahoo.com","5169-7990-9185-4334","401-555-2","775-555-8523","GH","916-555-7997"
++"1807","Petr K Bell","Frederick RoadBirmingham","34747",72.0,"F","married","inactive",30.0,"YES","NO","YES","NO",5257.00,101807,"CHECKING","petr_bell@ibm.com","6220264390045758","614-555-3116","517-555-3626","VU","petr.bell@ccdef.net"
++"1808","Collis I West","HockleyBirmingham","98270",82.0,"F","divorced","inactive",30.0,"YES","NO","YES","YES",5257.00,101808,"CHECKING","colliswest@blue.com","38111111111119","502-555-869","907-555-8700","QA","651-555-3923"
++"1809","Liam X Newsom","Rockingham RoadLeicester","77474-9337",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","NO",5188.59,101809,"CHECKING","liam_n@ccdef.net","5285696282092972","417-555-1255","406-555-6683","UZ","liamnewsom@web.de"
++"1809","Liam X Newsom","Rockingham RoadLeicester","77474-9337",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","NO",14028.41,201809,"SAVINGS","liam_n@ccdef.net","5285696282092972","417-555-1255","406-555-6683","UZ","liamnewsom@web.de"
++"1810","Marge J Dechenne","Rockingham RoadLeicester","33026-3969",40.0,"F","married","employee",19.0,"NO","YES","YES","NO",35410.00,101810,"CHECKING","mdechenne@gmx.net","5520111111111121","651-555-910","401-555-5320","GF","573-555-8148"
++"1811","Gerry W Knipp","Bishops StortfordHertfordshire","32611",85.0,"F","widowed","inactive",12.0,"NO","NO","NO","NO",10041.00,101811,"CHECKING","gerry_k@web.de","3400-000000-00009","517-555-9367","609-555-8923","UZ","317-555-8895"
++"1812","Cory R Oas","CLARKE STREETDERBY","92121-2990",77.0,"F","married","inactive",19.0,"YES","NO","YES","YES",8557.11,101812,"CHECKING","oas@t-online.de","30310111161029","515-555-8251",NULL,"CA","608-555-5253"
++"1812","Cory R Oas","CLARKE STREETDERBY","92121-2990",77.0,"F","married","inactive",19.0,"YES","NO","YES","YES",23135.89,201812,"SAVINGS","oas@t-online.de","30310111161029","515-555-8251",NULL,"CA","608-555-5253"
++"1813","Theresia J Lian","----------CHICHESTER WEST SUSS","29602",20.0,"F","single","inactive",5.0,"NO","NO","NO","YES",1108.00,101813,"CHECKING","tlian@msl.org","5462522444922689","404-555-1195","402-555-3704","UA","402-555-8027"
++"1814","Ronald B Mcguinness","HorsforthLeeds","27629",73.0,"F","married","inactive",13.0,"YES","NO","YES","NO",57871.00,101814,"CHECKING","ronaldmcguinness@blue.com","213130951856200","405-555-8386","843-555-165","MS","302-555-162"
++"1815","Shaaban F Vasavada","Park Farm Industrial EstateWellingborough","33332",72.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",3863.16,101815,"CHECKING","shaaban.vasavada@web.de","4024 0071 2159 5481","701-555-4841","802-555-831","MR","shaaban.vasavada@gmx.net"
++"1815","Shaaban F Vasavada","Park Farm Industrial EstateWellingborough","33332",72.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",10444.84,201815,"SAVINGS","shaaban.vasavada@web.de","4024 0071 2159 5481","701-555-4841","802-555-831","MR","shaaban.vasavada@gmx.net"
++"1816","Cleve D Bridwell","Western Industrial EstateCaerphilly","28405-4161",60.0,"F","married","farmer",23.0,"YES","NO","YES","YES",28111.00,101816,"CHECKING","cleve.bridwell@t-online.de","6011567891012132","843-555-6576",NULL,"KN","cleve_b@msl.org"
++"1817","Karen N Lien","DARTMOUTH ROADWEST MIDLANDS","86335-0922",75.0,"F","widowed","pensioner",22.0,"NO","NO","NO","YES",294296.00,101817,"CHECKING","klien@msn.com","30011111111119","404-555-1820",NULL,"CR","609-555-7079"
++"1818","Felip P Mcdavid","Parkeston QuayHarwich","32407",77.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",7055.64,101818,"CHECKING","felip_mcdavid@msn.com","5423111111111111","601-555-1738",NULL,"VC","775-555-2294"
++"1818","Felip P Mcdavid","Parkeston QuayHarwich","32407",77.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",19076.36,201818,"SAVINGS","felip_mcdavid@msn.com","5423111111111111","601-555-1738",NULL,"VC","775-555-2294"
++"1819","Conley O Wright","Barton BlountChurch Broughton","98506-2857",75.0,"F","married","inactive",1.0,"YES","NO","YES","NO",11008.00,101819,"CHECKING","conley_w@gmx.net","5169 7990 9185 4334","307-555-8162",NULL,"DO","conley.wright@ccdef.net"
++"1820","Willy S Besade","3-5 SWALLOW PLACELondon","87505",76.0,"F","married","inactive",5.0,"NO","NO","NO","YES",47855.00,101820,"CHECKING","willy_besade@gmx.net",NULL,"843-555-4383",NULL,"CF","512-555-8361"
++"1821","Ferrell Q Downey","Perry BarrBirmingham","36203",21.0,"M","single","inactive",4.0,"NO","YES","NO","YES",800.55,101821,"SAVINGS","ferrell_d@t-online.de","30210111161229","803-555-9452",NULL,"CK","775-555-7721"
++"1821","Ferrell Q Downey","Perry BarrBirmingham","36203",21.0,"M","single","inactive",4.0,"NO","YES","NO","YES",2164.45,201821,"CHECKING","ferrell_d@t-online.de","30210111161229","803-555-9452",NULL,"CK","775-555-7721"
++"1822","Spencer T Lytle","Node CourtCodicote","77340-6499",67.0,"F","widowed","inactive",4.0,"NO","NO","NO","YES",26761.00,101822,"SAVINGS","lytle@yahoo.com","4146-6643-9004-5458","512-555-1706",NULL,"SZ","608-555-6379"
++"1823","Wyman V Reichelman","NorthgateAldridge","27615",60.0,"F","widowed","worker",17.0,"NO","NO","NO","NO",16834.00,101823,"SAVINGS","wyman_r@blue.com","30011111111119","515-555-4401","602-555-7551","IS","505-555-4363"
++"1824","Danese P Rose","Industrial Est.Witney","34744",58.0,"F","married","farmer",28.0,"YES","NO","YES","NO",11704.23,101824,"SAVINGS","drose@de.ibm.com","5169799091854334","401-555-1006","515-555-7245","PA","385-555-6435"
++"1824","Danese P Rose","Industrial Est.Witney","34744",58.0,"F","married","farmer",28.0,"YES","NO","YES","NO",31644.77,201824,"CHECKING","drose@de.ibm.com","5169799091854334","401-555-1006","515-555-7245","PA","385-555-6435"
++"1825","Gaylord P Huot","105 Devonshire RoadLondon","43213",54.0,"M","married","worker",30.0,"NO","NO","YES","YES",1575.00,101825,"CHECKING","gaylord_huot@gmx.net","4024007121595481","573-555-4055","402-555-7935","YT","406-555-7744"
++"1826","Reuben A Ramos","Canal RoadLeeds","85011",24.0,"M","single","executives,self-employed",4.0,"NO","YES","NO","YES",2436.00,101826,"CHECKING","reuben_r@web.de","3528-3095-1856-2063","417-555-329","208-555-4679","SZ",NULL
++"1827","Earl Z Bryant","173 Friar Street 173 Friar Street Reading","86405-3356",41.0,"M","married","worker",22.0,"NO","NO","NO","NO",774.36,101827,"CHECKING","earl_bryant@aol.com","6220264390045758","401-555-2381",NULL,"VC","775-555-3345"
++"1827","Earl Z Bryant","173 Friar Street 173 Friar Street Reading","86405-3356",41.0,"M","married","worker",22.0,"NO","NO","NO","NO",2093.64,201827,"SAVINGS","earl_bryant@aol.com","6220264390045758","401-555-2381",NULL,"VC","775-555-3345"
++"1828","Tory H Turley","AIRPORT WAY LUTONBEDFORDSHIRE LU2 9NI","86322-4241",40.0,"F","married","employee",15.0,"NO","YES","YES","NO",1761.00,101828,"CHECKING","turley@aol.com","5285696282092972","518-555-9977",NULL,"PW","tturley@gmail.com"
++"1829","Jamie L Fink","North LaneAldershot","87500",44.0,"F","married","farmer",23.0,"NO","NO","YES","NO",18701.00,101829,"CHECKING","jamie_f@aol.com",NULL,"317-555-876",NULL,"NP","501-555-770"
++"1830","Ivars T Rozsa","60 FREDERICK STREETBIRMINGHAM","34741",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","YES",3200.85,101830,"SAVINGS","rozsa@blue.com","6011567891012132","202-555-3047",NULL,"BR","irozsa@msl.org"
++"1830","Ivars T Rozsa","60 FREDERICK STREETBIRMINGHAM","34741",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","YES",8654.15,201830,"CHECKING","rozsa@blue.com","6011567891012132","202-555-3047",NULL,"BR","irozsa@msl.org"
++"1831","Tiffany J Pierson","UNIT 33 IMEX BUSINESS PARKBIRMINGHAM","27604-3754",77.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",65400.00,101831,"SAVINGS","tiffany_p@cicn.gov","3400-000000-00009","501-555-7786",NULL,"PW","907-555-7"
++"1832","Cindi Q Burns","178/188 Great South West RoadHounslow","30013",73.0,"F","married","inactive",26.0,"YES","NO","YES","NO",4534.00,101832,"SAVINGS","burns@msl.org","38111111111119","615-555-1913","207-555-422","AU","burns@web.de"
++"1833","Jurij V Dreckman","3-5 SWALLOW PLACELondon","75212-4217",71.0,"F","married","inactive",1.0,"NO","NO","YES","NO",7915.59,101833,"SAVINGS","jdreckman@gmx.net","36111111111111","207-555-784",NULL,"SA","573-555-4772"
++"1833","Jurij V Dreckman","3-5 SWALLOW PLACELondon","75212-4217",71.0,"F","married","inactive",1.0,"NO","NO","YES","NO",21401.41,201833,"CHECKING","jdreckman@gmx.net","36111111111111","207-555-784",NULL,"SA","573-555-4772"
++"1834","Gail C Meredith","Lower EarlyReading","33637-6744",59.0,"F","married","intermediate professions",25.0,"NO","NO","NO","NO",-326.00,101834,"CHECKING","gmeredith@msl.org","5169-7990-9185-4334","517-555-594",NULL,"AQ","meredith@msl.org"
++"1835","Brantley C Araica","18-20 CROFTSBANK ROADMANCHESTER","77041-5361",38.0,"M","married","worker",20.0,"NO","YES","YES","NO",2073.00,101835,"CHECKING","brantley.araica@gmx.net","6520224090045455","302-555-9590","808-555-9604","MO","850-555-5761"
++"1836","Mamie L Matteson","Balliol Business Park East Balliol Business Park East Newcastle upon Tyne","33458",36.0,"F","single","employee",18.0,"NO","YES","NO","YES",110.43,101836,"CHECKING","matteson@aol.com","3528309518562063","302-555-4354",NULL,"LV","517-555-7991"
++"1836","Mamie L Matteson","Balliol Business Park East Balliol Business Park East Newcastle upon Tyne","33458",36.0,"F","single","employee",18.0,"NO","YES","NO","YES",298.57,201836,"SAVINGS","matteson@aol.com","3528309518562063","302-555-4354",NULL,"LV","517-555-7991"
++"1837","Gerard P Wendoloski","55 London RoadSt Albans","30353-0416",44.0,"M","single","employee",23.0,"NO","NO","NO","YES",12281.00,101837,"CHECKING","gerard_w@ibm.com","6220264390045758","651-555-1558",NULL,"KR","gerard_w@msn.com"
++"1838","Chet N Lacount","SpekeLiverpool","86426",50.0,"F","married","farmer",20.0,"NO","YES","YES","NO",7762.00,101838,"SAVINGS","clacount@yahoo.com","5520111111111121","505-555-7422","614-555-9032","JO","307-555-8357"
++"1839","Will Z Christensen","2 Purley WayCroydon","77039-3804",65.0,"M","married","pensioner",12.0,"NO","NO","YES","YES",3789.45,101839,"SAVINGS","willchristensen@web.de","4146664390045458","208-555-308",NULL,"RO","christensen@ccdef.net"
++"1839","Will Z Christensen","2 Purley WayCroydon","77039-3804",65.0,"M","married","pensioner",12.0,"NO","NO","YES","YES",10245.55,201839,"CHECKING","willchristensen@web.de","4146664390045458","208-555-308",NULL,"RO","christensen@ccdef.net"
++"1840","Nels F Le","SpekeLiverpool","32215",26.0,"M","single","inactive",11.0,"NO","NO","NO","YES",20476.00,101840,"SAVINGS","le@msl.org","30411111111111","602-555-7187","804-555-3101","GA","nle@blue.com"
++"1841","Linwood R Nicholes","DissNorfolk","28166-0485",39.0,"M","cohabitant","intermediate professions",15.0,"NO","NO","YES","NO",27208.00,101841,"SAVINGS","nicholes@t-online.de","30411111111111","518-555-5172","207-555-9118","GB","linwood_n@msn.com"
++"1842","Nathaniel O Arcadia","----------Kingswinford","90060-0036",80.0,"M","married","pensioner",42.0,"YES","NO","YES","NO",1732.59,101842,"CHECKING","nathaniel_a@t-online.de","213130951856200","603-555-499",NULL,"IO","207-555-176"
++"1842","Nathaniel O Arcadia","----------Kingswinford","90060-0036",80.0,"M","married","pensioner",42.0,"YES","NO","YES","NO",4684.41,201842,"SAVINGS","nathaniel_a@t-online.de","213130951856200","603-555-499",NULL,"IO","207-555-176"
++"1843","Marjosa T Antunes","STAND PARK / SHEFFIELD ROADCHESTERFIELD DERBY","77342-0099",47.0,"M","married","worker",25.0,"NO","NO","YES","NO",19627.00,101843,"CHECKING","marjosa_a@msn.com","30310111161029","303-555-1973",NULL,"CX","208-555-2700"
++"1844","Duncan Q Muldoon","120 VYSE STREETBIRMINGHAM","85011",79.0,"M","married","pensioner",48.0,"YES","NO","YES","NO",21710.00,101844,"CHECKING","duncan_m@t-online.de","3528-3095-1856-2063","802-555-9198",NULL,"FX","dmuldoon@t-online.de"
++"1845","Christian P Finnin","TamworthStaffs","75212-4217",62.0,"F","married","inactive",17.0,"NO","NO","YES","NO",3553.20,101845,"CHECKING","christianfinnin@icloud.com","38111111111119","503-555-6831","303-555-9346","KE","615-555-2147"
++"1845","Christian P Finnin","TamworthStaffs","75212-4217",62.0,"F","married","inactive",17.0,"NO","NO","YES","NO",9606.80,201845,"SAVINGS","christianfinnin@icloud.com","38111111111119","503-555-6831","303-555-9346","KE","615-555-2147"
++"1846","Sammye O Krants","Foundry CloseHorsham","32809-5500",61.0,"F","divorced","inactive",1.0,"NO","NO","NO","YES",11046.00,101846,"CHECKING",NULL,"4146 6643 9004 5458","775-555-8745","401-555-4134","AW","808-555-1584"
++"1847","Belinda X Christman","Tame RoadBirmingham","34266",37.0,"F","cohabitant","employee",3.0,"NO","NO","YES","YES",3802.00,101847,"SAVINGS","belindachristman@msn.com","30510011111111",NULL,NULL,"MX","916-555-4373"
++"1848","Christie M Artus","DroitwichWorcester","77041-5361",25.0,"M","single","worker",5.0,"NO","YES","NO","YES",285.12,101848,"SAVINGS","christie.artus@de.ibm.com","6220264390045758","701-555-9430","843-555-9674","BI","334-555-4440"
++"1848","Christie M Artus","DroitwichWorcester","77041-5361",25.0,"M","single","worker",5.0,"NO","YES","NO","YES",770.88,201848,"CHECKING","christie.artus@de.ibm.com","6220264390045758","701-555-9430","843-555-9674","BI","334-555-4440"
++"1849","Melinda W Aboytez","SMETHWICK WARLEYWEST MIDLANDS","77093-1894",39.0,"M","married","farmer",20.0,"NO","NO","YES","NO",1377.00,101849,"SAVINGS","melindaaboytez@de.ibm.com","30510011111111","501-555-9781",NULL,"EE","melinda_aboytez@ccdef.net"
++"1850","Rino H Lepper","Breeds PlaceHastings","85085",36.0,"F","single","inactive",17.0,"NO","NO","NO","NO",11125.00,101850,"SAVINGS","lepper@cicn.gov","4146664390045458","605-555-9774",NULL,"CU","512-555-3111"
++"1851","Margret H Anna","WittonBirmingham","32792-2237",19.0,"M","single","employee",3.0,"NO","YES","NO","YES",427.41,101851,"SAVINGS","anna@yahoo.com","4146664390045458","512-555-5064",NULL,"HN",NULL
++"1851","Margret H Anna","WittonBirmingham","32792-2237",19.0,"M","single","employee",3.0,"NO","YES","NO","YES",1155.59,201851,"CHECKING","anna@yahoo.com","4146664390045458","512-555-5064",NULL,"HN",NULL
++"1852","Cesarea C Cho","Trafalgar WayCamberley","28001",71.0,"F","widowed","pensioner",8.0,"NO","NO","NO","YES",1829.00,101852,"CHECKING","cho@web.de","30210111161229","502-555-3128",NULL,"SH","cho@cicn.gov"
++"1853","Michel B Spiegel","DissNorfolk","76548",75.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",36470.00,101853,"CHECKING","mspiegel@web.de","38111111111119","406-555-4137",NULL,"SN","919-555-7331"
++"1854","Mitchell V Redding","Castletown WaySunderland","87015-9739",68.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",4644.27,101854,"CHECKING","redding@t-online.de","5169-7990-9185-4334","803-555-2934","503-555-8790","SL","803-555-8894"
++"1854","Mitchell V Redding","Castletown WaySunderland","87015-9739",68.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",12556.73,201854,"SAVINGS","redding@t-online.de","5169-7990-9185-4334","803-555-2934","503-555-8790","SL","803-555-8894"
++"1855","Linden D Soforenko","ABERCRAVE CAERBONTSWANSEA","27419",50.0,"F","married","craftsmen, storekeepers",18.0,"NO","NO","YES","YES",5831.00,101855,"CHECKING","lsoforenko@msn.com","4146664390045458","573-555-1121","608-555-3147","RW","406-555-7393"
++"1856","Winnie K Baltzell","----------SWANSEA","29652",90.0,"M","widowed","pensioner",12.0,"NO","NO","YES","NO",63877.00,101856,"CHECKING","wbaltzell@icloud.com",NULL,"307-555-4208","518-555-3173","TK","winniebaltzell@msn.com"
++"1857","Carmel U Desmet","Little Aston RoadAldridge","28235",69.0,"M","married","pensioner",30.0,"NO","NO","YES","NO",19058.76,101857,"CHECKING","desmet@de.ibm.com","36111111111111","775-555-9962","803-555-7315","BO","517-555-7874"
++"1857","Carmel U Desmet","Little Aston RoadAldridge","28235",69.0,"M","married","pensioner",30.0,"NO","NO","YES","NO",51529.24,201857,"SAVINGS","desmet@de.ibm.com","36111111111111","775-555-9962","803-555-7315","BO","517-555-7874"
++"1858","Gisela W Brooks","ErdingtonBirmingham","59414-0528",50.0,"M","married","farmer",28.0,"YES","NO","YES","YES",35164.00,101858,"SAVINGS","brooks@web.de","4024 0071 2159 5481","602-555-8229",NULL,"MV","gisela_brooks@icloud.com"
++"1859","Lon Q Geoghegan","224 Marsh HillBirmingham","32809-5500",23.0,"M","single","inactive",5.0,"NO","YES","NO","YES",131.00,101859,"SAVINGS","lgeoghegan@msn.com","30411111111111","302-555-9840",NULL,"VA","615-555-9074"
++"1860","Jurij Q Shirley","Blucher StreetBirmingham","77305-1135",38.0,"M","single","intermediate professions",23.0,"NO","NO","NO","YES",7656.12,101860,"SAVINGS","jshirley@t-online.de","340000000000009","404-555-5591",NULL,"VU","617-555-7089"
++"1860","Jurij Q Shirley","Blucher StreetBirmingham","77305-1135",38.0,"M","single","intermediate professions",23.0,"NO","NO","NO","YES",20699.88,201860,"CHECKING","jshirley@t-online.de","340000000000009","404-555-5591",NULL,"VU","617-555-7089"
++"1861","Chelle E Garner","Sherbourne DriveTilbrook Milton Keyn","85011",46.0,"F","married","employee",23.0,"NO","NO","YES","NO",111.00,101861,"SAVINGS","chelle_garner@gmx.net","213130951856200","617-555-1784",NULL,"LA","512-555-750"
++"1862","Nina S Branham","6/8 HIGH STREETCHESHIRE","75185",47.0,"M","married","worker",23.0,"NO","YES","YES","YES",1044.00,101862,"CHECKING","nina.branham@msn.com",NULL,"608-555-8439","417-555-2229","HU","385-555-9611"
++"1863","Greta K Segura","Central Trading EstateStaines","30060-2358",51.0,"F","married","intermediate professions",9.0,"NO","YES","NO","NO",2263.68,101863,"CHECKING","greta_s@de.ibm.com","5169-7990-9185-4334","904-555-8628",NULL,"SH","greta_segura@msn.com"
++"1863","Greta K Segura","Central Trading EstateStaines","30060-2358",51.0,"F","married","intermediate professions",9.0,"NO","YES","NO","NO",6120.32,201863,"SAVINGS","greta_s@de.ibm.com","5169-7990-9185-4334","904-555-8628",NULL,"SH","greta_segura@msn.com"
++"1864","Sonia W Mcginnis","---------- RETURN TO LYNN CLAYPOOL HOUSTON","77042-6700",71.0,"F","widowed","inactive",18.0,"NO","NO","NO","YES",20231.00,101864,"CHECKING","mcginnis@t-online.de","4146664390045458","334-555-4316","401-555-1254","PN","sonia_mcginnis@gmx.net"
++"1865","Savannah Z Birk","----------Worthing","77041-5361",47.0,"M","single","farmer",4.0,"YES","NO","NO","NO",106796.00,101865,"CHECKING","savannah_birk@aol.com","5285696282092972","317-555-1103",NULL,"NP","225-555-2589"
++"1866","Staurt E Auger","Bellbrook ParkUckfield","29171-2239",62.0,"F","widowed","inactive",13.0,"NO","NO","NO","YES",9425.43,101866,"SAVINGS","auger@msl.org","3400-000000-00009","802-555-3649","307-555-5955","ES","406-555-21"
++"1866","Staurt E Auger","Bellbrook ParkUckfield","29171-2239",62.0,"F","widowed","inactive",13.0,"NO","NO","NO","YES",25483.57,201866,"CHECKING","auger@msl.org","3400-000000-00009","802-555-3649","307-555-5955","ES","406-555-21"
++"1867","Marcel E Hersch","10 Mordaunt RoadLondon","32839-2406",47.0,"M","married","intermediate professions",22.0,"NO","YES","YES","YES",4203.00,101867,"SAVINGS","marcel_h@web.de",NULL,"303-555-970",NULL,"FO","785-555-2458"
++"1868","Claude W Border","Winyates GreenRedditch","99518-2364",65.0,"F","married","pensioner",30.0,"YES","NO","YES","NO",29474.00,101868,"SAVINGS","border@blue.com","30510011111111","385-555-7747","505-555-9584","UG","225-555-692"
++"1869","Elsa Z Redeemar","Rehouse Industrial EstateAldridge","12754",62.0,"F","married","inactive",21.0,"NO","NO","YES","NO",1451.52,101869,"SAVINGS","elsa_redeemar@yahoo.com","38111111111119","615-555-6784","303-555-9400","SK","615-555-1005"
++"1869","Elsa Z Redeemar","Rehouse Industrial EstateAldridge","12754",62.0,"F","married","inactive",21.0,"NO","NO","YES","NO",3924.48,201869,"CHECKING","elsa_redeemar@yahoo.com","38111111111119","615-555-6784","303-555-9400","SK","615-555-1005"
++"1870","Lee K Blaker","Erdington Erdington Birmingham","34639",24.0,"F","single","inactive",3.0,"NO","YES","NO","YES",1442.00,101870,"SAVINGS","lblaker@web.de","3530 1113 3330 0000","775-555-7575",NULL,"CY",NULL
++"1871","Koganti U Barnes","West Point Business ParkAndover","85234-5783",82.0,"M","married","pensioner",27.0,"NO","YES","YES","NO",55122.00,101871,"CHECKING","barnes@ccdef.net","30210111161229","614-555-6316",NULL,"OM","kbarnes@blue.com"
++"1872","Lew R Newberry","RIVERSIDE WAYCAMBERLEY SURREY GU","86406-8150",69.0,"F","widowed","farmer",26.0,"NO","NO","YES","NO",5347.35,101872,"CHECKING","lew_newberry@t-online.de","4146 6643 9004 5458","609-555-8941","360-555-3874","CI","803-555-766"
++"1872","Lew R Newberry","RIVERSIDE WAYCAMBERLEY SURREY GU","86406-8150",69.0,"F","widowed","farmer",26.0,"NO","NO","YES","NO",14457.65,201872,"SAVINGS","lew_newberry@t-online.de","4146 6643 9004 5458","609-555-8941","360-555-3874","CI","803-555-766"
++"1873","Meir S Lewis","Parkeston QuayHarwich","87111-3601",19.0,"F","single","inactive",6.0,"NO","NO","NO","YES",19895.00,101873,"CHECKING","lewis@msn.com","4024007121595481","601-555-4923",NULL,"IE","505-555-4172"
++"1874","Paul Q Benz","Parkeston QuayHarwich","99169-1312",36.0,"M","married","farmer",18.0,"YES","NO","YES","NO",28831.00,101874,"CHECKING","paul_b@msn.com","36111111111111","573-555-669",NULL,"CM","860-555-2183"
++"1875","Clay G Molis","RedditchWorcester","27605-0601",39.0,"M","married","executives,self-employed",2.0,"NO","NO","YES","YES",7658.82,101875,"CHECKING","claymolis@aol.com","30210111161229","802-555-590",NULL,"GW","molis@yahoo.com"
++"1875","Clay G Molis","RedditchWorcester","27605-0601",39.0,"M","married","executives,self-employed",2.0,"NO","NO","YES","YES",20707.18,201875,"SAVINGS","claymolis@aol.com","30210111161229","802-555-590",NULL,"GW","molis@yahoo.com"
++"1876","Angie V Henning","WittonBirmingham","77041-5361",85.0,"F","widowed","inactive",24.0,"NO","NO","NO","YES",3052.00,101876,"CHECKING","ahenning@web.de","4024 0071 2159 5481","803-555-6074","916-555-3702","HR",NULL
++"1877","Rod H Walters","Barkby RoadLeicester","92799-1117",42.0,"F","married","employee",13.0,"NO","YES","YES","NO",-713.00,101877,"CHECKING","rwalters@yahoo.com","30011111111119","405-555-2925","860-555-7782","MV","804-555-3444"
++"1878","Lisa-Diane H Wight","Arlington Way Sundorne RetailShrewsbury","8086",35.0,"F","single","inactive",9.0,"NO","YES","NO","YES",136.89,101878,"CHECKING","wight@aol.com","5383908528354962","406-555-2473",NULL,"YT","603-555-7346"
++"1878","Lisa-Diane H Wight","Arlington Way Sundorne RetailShrewsbury","8086",35.0,"F","single","inactive",9.0,"NO","YES","NO","YES",370.11,201878,"SAVINGS","wight@aol.com","5383908528354962","406-555-2473",NULL,"YT","603-555-7346"
++"1879","Bizhan U Jacobus","Canal Road Canal Road Leeds","77087-4126",32.0,"M","divorced","worker",4.0,"NO","NO","NO","NO",2587.00,101879,"CHECKING","bizhan.jacobus@blue.com","5423111111111111","808-555-8766",NULL,"FK",NULL
++"1880","Walt J Brown","LOWER MIDDLETON STREETDERBYSHIRE","33710",67.0,"F","married","pensioner",28.0,"YES","NO","YES","NO",39500.00,101880,"CHECKING","brown@msn.com","3400 000000 00009","907-555-865","802-555-8355","JM","brown@cicn.gov"
++"1881","Lorretta Z Ard","Swiss CottageLondon","80104-3009",57.0,"M","married","craftsmen, storekeepers",22.0,"YES","NO","YES","NO",16964.91,101881,"SAVINGS","lorretta_ard@icloud.com","378282246310005","406-555-356",NULL,"AL","lorretta.ard@yahoo.com"
++"1881","Lorretta Z Ard","Swiss CottageLondon","80104-3009",57.0,"M","married","craftsmen, storekeepers",22.0,"YES","NO","YES","NO",45868.09,201881,"CHECKING","lorretta_ard@icloud.com","378282246310005","406-555-356",NULL,"AL","lorretta.ard@yahoo.com"
++"1882","Gino U Block","IDOTTSVAGEN 7SWEDEN","71134",75.0,"M","married","pensioner",2.0,"NO","NO","YES","YES",63397.00,101882,"SAVINGS","gblock@msl.org","5423111111111111","717-555-5871",NULL,"KG","919-555-4717"
++"1883","Bryon M Wheeldon","2 Tanners DriveMilton Keynes","85220-7002",67.0,"F","widowed","pensioner",15.0,"NO","NO","NO","NO",41921.00,101883,"SAVINGS","bryon.wheeldon@ccdef.net","4024007121595481","515-555-6215","406-555-3642","TJ","609-555-4110"
++"1884","Klaus I Trice","Witton P O BOX 660321 Birmingham","86404-2163",69.0,"M","married","pensioner",29.0,"NO","NO","YES","YES",8431.02,101884,"SAVINGS","ktrice@ibm.com","3528309518562063","515-555-3537","317-555-9500","BE","907-555-7606"
++"1884","Klaus I Trice","Witton P O BOX 660321 Birmingham","86404-2163",69.0,"M","married","pensioner",29.0,"NO","NO","YES","YES",22794.98,201884,"CHECKING","ktrice@ibm.com","3528309518562063","515-555-3537","317-555-9500","BE","907-555-7606"
++"1885","Wayne G Heltzel","TewkesburyGloucester","56502-1708",65.0,"F","single","employee",26.0,"NO","NO","NO","NO",5444.00,101885,"CHECKING",NULL,"5169 7990 9185 4334","843-555-3352",NULL,"BA","wayne_h@icloud.com"
++"1886","Almondo O Kurtz","Small HeathBirmingham","78642-6323",76.0,"M","married","pensioner",45.0,"NO","NO","YES","NO",32347.00,101886,"CHECKING","kurtz@gmx.net","5580977968891503","334-555-2083",NULL,"FO","615-555-3010"
++"1887","Dene F Hartwell","Barkby RoadLeicester","34475-5620",61.0,"F","married","inactive",21.0,"YES","NO","YES","NO",19445.67,101887,"CHECKING","dhartwell@msn.com","5383908528354962","717-555-8834","360-555-6582","FK","dene_hartwell@t-online.de"
++"1887","Dene F Hartwell","Barkby RoadLeicester","34475-5620",61.0,"F","married","inactive",21.0,"YES","NO","YES","NO",52575.33,201887,"SAVINGS","dhartwell@msn.com","5383908528354962","717-555-8834","360-555-6582","FK","dene_hartwell@t-online.de"
++"1888","Merion H Mccleery","Bellbrook ParkUckfield","56501-7002",67.0,"M","married","pensioner",19.0,"YES","NO","YES","YES",75194.00,101888,"CHECKING","merion.mccleery@blue.com",NULL,"608-555-8411",NULL,"PE","merion_mccleery@de.ibm.com"
++"1889","Marta W Cooley","Brickyard RoadWalsall","86004-3851",33.0,"M","single","worker",15.0,"YES","NO","NO","YES",11621.00,101889,"CHECKING","mcooley@web.de","5580977968891503","385-555-4985",NULL,"CZ","334-555-420"
++"1890","Viviana B Churchill","LAWFORD HEATH_IND. ESTATE RETURN TO AMY WATSON  CP651 RUGBY","34697-1348",58.0,"F","married","inactive",30.0,"NO","NO","YES","NO",1689.93,101890,"CHECKING","viviana.churchill@icloud.com","378282246310005","603-555-4856",NULL,"WF","303-555-1903"
++"1890","Viviana B Churchill","LAWFORD HEATH_IND. ESTATE RETURN TO AMY WATSON  CP651 RUGBY","34697-1348",58.0,"F","married","inactive",30.0,"NO","NO","YES","NO",4569.07,201890,"SAVINGS","viviana.churchill@icloud.com","378282246310005","603-555-4856",NULL,"WF","303-555-1903"
++"1891","Andrea K Aarestad","STAFFORD PARK 4SHROPSHIRE","32855-5433",32.0,"F","separated","employee",2.0,"NO","NO","NO","YES",1309.00,101891,"SAVINGS","andrea_aarestad@ibm.com","5169799091854334","603-555-8234",NULL,"TC","225-555-5233"
++"1892","Rexford A Yaupon","Castletown WaySunderland","86406-7339",20.0,"M","single","executives,self-employed",3.0,"NO","NO","NO","YES",14600.00,101892,"SAVINGS","rexford_yaupon@gmx.net","30011111111119","401-555-5678",NULL,"LR","919-555-875"
++"1893","Landen K Pugh","502 HONEYPOT LANESTANMORE MIDDLESEX","34446",46.0,"M","married","worker",21.0,"NO","YES","YES","YES",967.68,101893,"SAVINGS","landen_pugh@t-online.de","30011111111119","904-555-2332","401-555-5166","MR","401-555-2104"
++"1893","Landen K Pugh","502 HONEYPOT LANESTANMORE MIDDLESEX","34446",46.0,"M","married","worker",21.0,"NO","YES","YES","YES",2616.31,201893,"CHECKING","landen_pugh@t-online.de","30011111111119","904-555-2332","401-555-5166","MR","401-555-2104"
++"1894","Vikki I Okita","ABERCRAVE CAERBONTSWANSEA","29650-4724",43.0,"M","single","executives,self-employed",10.0,"NO","NO","NO","NO",-77716.00,101894,"SAVINGS","okita@blue.com","4024 0071 2159 5481","609-555-8714",NULL,"KP","503-555-6301"
++"1895","Herman N Falk","14B Bradford StreetShifnal","30024-7120",44.0,"F","married","inactive",9.0,"NO","YES","YES","NO",-857.00,101895,"SAVINGS","hfalk@gmx.net","5169-7990-9185-4334","850-555-3215",NULL,"TZ","falk@msl.org"
++"1896","Stafford S Kerr","3-5 swallow placwMayfair 3-5 swallow placwMayfair London","75391",35.0,"M","divorced","worker",6.0,"NO","YES","NO","NO",1615.41,101896,"SAVINGS","stafford_k@cicn.gov","213130951856200","850-555-8725","916-555-6026","ZA","stafford_k@aol.com"
++"1896","Stafford S Kerr","3-5 swallow placwMayfair 3-5 swallow placwMayfair London","75391",35.0,"M","divorced","worker",6.0,"NO","YES","NO","NO",4367.59,201896,"CHECKING","stafford_k@cicn.gov","213130951856200","850-555-8725","916-555-6026","ZA","stafford_k@aol.com"
++"1897","Bobbie J Loseke","27 Sandy LaneBirmingham","59911-6536",29.0,"M","single","worker",9.0,"NO","YES","NO","YES",8166.00,101897,"SAVINGS","bobbie_l@gmail.com","5462522444922689","614-555-4654","512-555-5420","HM","334-555-8923"
++"1898","Quincy L Garrigues","9300 NORMANDY BLVD BLDG 4JACKSONVILLE","32221-5522",73.0,"F","widowed","pensioner",20.0,"NO","NO","NO","NO",10496.00,101898,"SAVINGS","garrigues@cicn.gov","5423111111111111","225-555-5023",NULL,"AL","417-555-2304"
++"1899","Leigh L Collis","White Horse Business ParkTrowbridge","29063-9071",77.0,"M","married","pensioner",26.0,"NO","NO","NO","NO",2209.68,101899,"CHECKING","lcollis@ccdef.net","30510011111111","512-555-9659",NULL,"AQ","907-555-4455"
++"1899","Leigh L Collis","White Horse Business ParkTrowbridge","29063-9071",77.0,"M","married","pensioner",26.0,"NO","NO","NO","NO",5974.32,201899,"SAVINGS","lcollis@ccdef.net","30510011111111","512-555-9659",NULL,"AQ","907-555-4455"
++"1900","Ray V Cribbs","King Norton P O BOX 660321 Birmingham","32609-5626",40.0,"M","divorced","worker",6.0,"NO","NO","NO","YES",160434.00,101900,"CHECKING","ray.cribbs@de.ibm.com","4146-6643-9004-5458","907-555-1991","515-555-1231","TF","401-555-1061"
++"1901","Michael B Grossnickle","ALTHORP ROAD P O BOX 660321 LONDON","77304-3304",45.0,"M","married","executives,self-employed",19.0,"NO","NO","YES","YES",2185.00,101901,"CHECKING","mgrossnickle@de.ibm.com","4024007121595481","302-555-4068","651-555-4760","GW","518-555-5326"
++"1902","Ming Y Gibson","CROMWELL ROADCAMBS.","29169-4763",34.0,"F","single","employee",5.0,"NO","NO","NO","YES",1891.89,101902,"CHECKING","ming_g@web.de","5285696282092972","617-555-8956",NULL,"MU","334-555-8888"
++"1902","Ming Y Gibson","CROMWELL ROADCAMBS.","29169-4763",34.0,"F","single","employee",5.0,"NO","NO","NO","YES",5115.11,201902,"SAVINGS","ming_g@web.de","5285696282092972","617-555-8956",NULL,"MU","334-555-8888"
++"1903","Armond W Solerg","DARTMOUTH ROADWEST MIDLANDS","85706",61.0,"F","married","inactive",31.0,"NO","NO","YES","NO",142347.00,101903,"CHECKING","solerg@de.ibm.com","5169 7990 9185 4334","401-555-1267","401-555-2795","MQ","602-555-3463"
++"1904","Leonard W Cherella","3-5 SWALLOW PLACELondon","67501",75.0,"F","married","pensioner",27.0,"YES","NO","YES","NO",18529.00,101904,"CHECKING","leonardcherella@blue.com","5285696282092972","402-555-8627","225-555-9326","IT","402-555-133"
++"1905","Lorie S Davis","Church RoadBristol","33920-3837",84.0,"M","married","pensioner",27.0,"YES","NO","YES","NO",5002.83,101905,"CHECKING","lorie_d@cicn.gov","3400 000000 00009","307-555-2746",NULL,"SN","lorie_davis@web.de"
++"1905","Lorie S Davis","Church RoadBristol","33920-3837",84.0,"M","married","pensioner",27.0,"YES","NO","YES","NO",13526.17,201905,"SAVINGS","lorie_d@cicn.gov","3400 000000 00009","307-555-2746",NULL,"SN","lorie_davis@web.de"
++"1906","Cleatus T Ratajczak","183 Great Howard StreetLiverpool","77429",40.0,"F","married","intermediate professions",3.0,"NO","NO","YES","NO",2407.00,101906,"CHECKING","cratajczak@ibm.com","5169799091854334","850-555-4948",NULL,"UZ","515-555-3421"
++"1907","Ann N Wiles","Springlakes Industrial EstateAldershot","85745",33.0,"M","single","intermediate professions",13.0,"NO","YES","NO","YES",-1366.00,101907,"CHECKING",NULL,"180030951856201","503-555-5460",NULL,"VU","ann_w@aol.com"
++"1908","Loyd E Devlin","Wood Lane EndHemel Hempstead","86403-9368",46.0,"F","single","inactive",14.0,"YES","NO","NO","YES",180.63,101908,"CHECKING","loyddevlin@gmail.com","340000000000009","904-555-106","302-555-6563","MA","401-555-4405"
++"1908","Loyd E Devlin","Wood Lane EndHemel Hempstead","86403-9368",46.0,"F","single","inactive",14.0,"YES","NO","NO","YES",488.37,201908,"SAVINGS","loyddevlin@gmail.com","340000000000009","904-555-106","302-555-6563","MA","401-555-4405"
++"1909","Bee U Mitchell","120 VYSE STREETBIRMINGHAM","63043",30.0,"M","single","employee",11.0,"YES","NO","NO","YES",4265.00,101909,"SAVINGS","bee.mitchell@aol.com","4146664390045458","615-555-2379",NULL,"MK","bee.mitchell@t-online.de"
++"1910","Roth B Dismukes","Great YarmouthNorfolk","29407",23.0,"F","single","inactive",4.0,"NO","YES","NO","YES",-233.00,101910,"SAVINGS","roth.dismukes@aol.com","4146664390045458","615-555-8744","802-555-8004","AG","775-555-4373"
++"1911","Henning P Olson","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27613-1103",21.0,"F","single","inactive",3.0,"NO","NO","NO","YES",718.47,101911,"SAVINGS","henning_o@aol.com","30510011111111","402-555-9830",NULL,"SO","501-555-8264"
++"1911","Henning P Olson","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27613-1103",21.0,"F","single","inactive",3.0,"NO","NO","NO","YES",1942.53,201911,"CHECKING","henning_o@aol.com","30510011111111","402-555-9830",NULL,"SO","501-555-8264"
++"1912","Cathrine G Subramaniam","35 Livery StreetBirmingham","46038",28.0,"F","single","inactive",17.0,"YES","NO","NO","YES",729.00,101912,"SAVINGS","subramaniam@cicn.gov","5169 7990 9185 4334","385-555-7570",NULL,"CH","850-555-132"
++"1913","Sally D Fagan","Breakspear WayHemel Hempstead","85215-1111",22.0,"M","single","inactive",11.0,"NO","NO","NO","YES",2192.00,101913,"SAVINGS","sfagan@gmx.net","5169799091854334","202-555-1870","916-555-2217","CO","843-555-2050"
++"1914","Doyle I Davidson","Trafalgar WayCamberley","10011",71.0,"M","married","pensioner",11.0,"NO","NO","NO","YES",25615.71,101914,"SAVINGS","doyle_d@aol.com","3528-3095-1856-2063","775-555-4616",NULL,"CX",NULL
++"1914","Doyle I Davidson","Trafalgar WayCamberley","10011",71.0,"M","married","pensioner",11.0,"NO","NO","NO","YES",69257.29,201914,"CHECKING","doyle_d@aol.com","3528-3095-1856-2063","775-555-4616",NULL,"CX",NULL
++"1915","Nboc L Erwin","Park Farm Industrial EstateWellingborough","34761",23.0,"F","single","inactive",4.0,"NO","NO","NO","YES",7991.00,101915,"SAVINGS","nboc.erwin@cicn.gov","5580977968891503","850-555-4072",NULL,"HN","302-555-4216"
++"1916","Henning S Willey","LympheHythe","78666",74.0,"F","married","pensioner",22.0,"YES","NO","YES","YES",1263.00,101916,"SAVINGS","willey@aol.com","4024-0071-2159-5481","502-555-7834","515-555-3800","IS","henning_willey@icloud.com"
++"1917","Cathi S Baner","UrmstonManchester","77535-3014",45.0,"M","married","worker",23.0,"NO","NO","YES","YES",1258.47,101917,"SAVINGS","baner@cicn.gov","5169-7990-9185-4334","417-555-5700","505-555-9175","GH","803-555-4495"
++"1917","Cathi S Baner","UrmstonManchester","77535-3014",45.0,"M","married","worker",23.0,"NO","NO","YES","YES",3402.52,201917,"CHECKING","baner@cicn.gov","5169-7990-9185-4334","417-555-5700","505-555-9175","GH","803-555-4495"
++"1918","Lona V Hutts","Dinwall RoadCroydon","77388",44.0,"M","married","intermediate professions",15.0,"NO","NO","YES","NO",3199.00,101918,"SAVINGS","hutts@gmx.net","38111111111119","515-555-4756",NULL,"KR","785-555-6344"
++"1919","Shaye M Kardell","Small HeathBirmingham","86406-7746",50.0,"M","married","farmer",27.0,"NO","NO","NO","YES",18134.00,101919,"SAVINGS","kardell@de.ibm.com","6220264390045758","385-555-3017","609-555-2609","GY","907-555-5742"
++"1920","Aldona T Sonido","GolborneWarrington","33409-5274",23.0,"F","single","inactive",5.0,"NO","YES","NO","YES",59.94,101920,"SAVINGS","aldona.sonido@icloud.com","378282246310005","517-555-8650",NULL,"IN","850-555-2694"
++"1920","Aldona T Sonido","GolborneWarrington","33409-5274",23.0,"F","single","inactive",5.0,"NO","YES","NO","YES",162.06,201920,"CHECKING","aldona.sonido@icloud.com","378282246310005","517-555-8650",NULL,"IN","850-555-2694"
++"1921","Michelle I Duzenack","ASTON HALL RoadBIRMINGHAM","28270",81.0,"F","widowed","pensioner",3.0,"NO","NO","NO","YES",8585.00,101921,"CHECKING","michelle_duzenack@msn.com","5423111111111111","402-555-3653",NULL,"IN","803-555-5560"
++"1922","Sheldon J Peter","SMETHWICK WARLEYWEST MIDLANDS","30308-2101",57.0,"F","married","employee",20.0,"NO","NO","NO","NO",8232.00,101922,"CHECKING","sheldon_p@msl.org","30411111111111","307-555-8834","505-555-2924","CO",NULL
++"1923","Kyong-Ok Y Lord","Round SpinneyNorthampton","28247",40.0,"M","married","worker",18.0,"NO","NO","YES","YES",1166.13,101923,"CHECKING","kyong-ok_lord@t-online.de","3528309518562063","404-555-4752","202-555-5436","GT","515-555-9061"
++"1923","Kyong-Ok Y Lord","Round SpinneyNorthampton","28247",40.0,"M","married","worker",18.0,"NO","NO","YES","YES",3152.87,201923,"SAVINGS","kyong-ok_lord@t-online.de","3528309518562063","404-555-4752","202-555-5436","GT","515-555-9061"
++"1924","Joann L Adachi","10 Mordaunt RoadLondon","13214-1852",31.0,"M","cohabitant","employee",1.0,"NO","YES","NO","YES",-21.00,101924,"CHECKING","joann_adachi@aol.com","30411111111111","417-555-2980",NULL,"PA","jadachi@icloud.com"
++"1925","Bev B Pelaez","Breakspear WayHemel Hempstead","30152-3883",35.0,"F","married","employee",14.0,"NO","YES","YES","NO",16677.00,101925,"CHECKING","pelaez@ccdef.net","180030951856201","334-555-9054",NULL,"MU",NULL
++"1926","Gunther I Laninga","Winyates GreenRedditch","80216",30.0,"M","single","worker",10.0,"NO","YES","NO","NO",552.96,101926,"CHECKING","laninga@aol.com","5462522444922689","401-555-4318","303-555-2960","MS","334-555-8299"
++"1926","Gunther I Laninga","Winyates GreenRedditch","80216",30.0,"M","single","worker",10.0,"NO","YES","NO","NO",1495.04,201926,"SAVINGS","laninga@aol.com","5462522444922689","401-555-4318","303-555-2960","MS","334-555-8299"
++"1927","Nathaniel I Campoy","West GateLondon","33155-4637",64.0,"F","widowed","pensioner",19.0,"NO","NO","NO","YES",24376.00,101927,"SAVINGS","campoy@gmail.com","4146664390045458","515-555-7934","502-555-3977","CV","ncampoy@gmx.net"
++"1928","Ralston M Kortepeter","10 MORDAUNT ROADLONDON","85277-1028",57.0,"M","single","worker",9.0,"NO","NO","NO","YES",21414.00,101928,"SAVINGS","kortepeter@cicn.gov","5520111111111121","503-555-800","907-555-4655","PF","406-555-824"
++"1929","Jerald O Thurman","10 MORDAUNT ROADLONDON","32086",53.0,"M","married","craftsmen, storekeepers",17.0,"YES","NO","YES","NO",-654.48,101929,"SAVINGS",NULL,"4146 6643 9004 5458","808-555-7447",NULL,"SJ","jerald_thurman@aol.com"
++"1929","Jerald O Thurman","10 MORDAUNT ROADLONDON","32086",53.0,"M","married","craftsmen, storekeepers",17.0,"YES","NO","YES","NO",-1769.52,201929,"CHECKING",NULL,"4146 6643 9004 5458","808-555-7447",NULL,"SJ","jerald_thurman@aol.com"
++"1930","Elliot F Young","Boyatt WoodEastleigh","32773",74.0,"F","widowed","inactive",0.0,"NO","NO","NO","YES",24911.00,101930,"SAVINGS","eyoung@t-online.de","5580977968891503","503-555-6513",NULL,"IE","603-555-4890"
++"1931","Brantley G Staton","Willen LakeMilton Keynes","33845",73.0,"F","divorced","pensioner",1.0,"NO","NO","NO","NO",1303.00,101931,"SAVINGS","brantleystaton@t-online.de","5462522444922689","701-555-1567",NULL,"TJ","615-555-772"
++"1932","Emelia X Read","148 Edmund StreetBirmingham","77255-5649",57.0,"F","married","worker",19.0,"NO","NO","YES","NO",9569.88,101932,"SAVINGS","emelia_read@ibm.com","5423111111111111","609-555-6517",NULL,"FX","785-555-1054"
++"1932","Emelia X Read","148 Edmund StreetBirmingham","77255-5649",57.0,"F","married","worker",19.0,"NO","NO","YES","NO",25874.12,201932,"CHECKING","emelia_read@ibm.com","5423111111111111","609-555-6517",NULL,"FX","785-555-1054"
++"1933","Marian E Grelle","Trafalgar WayCamberley","99163-0306",49.0,"M","married","farmer",25.0,"YES","NO","NO","YES",39003.00,101933,"SAVINGS","mgrelle@ibm.com","6011567891012132","402-555-4502",NULL,"MD","marian_g@yahoo.com"
++"1934","Rosalie I Perillo","HatfieldHertfordshire","77080-8111",76.0,"F","widowed","pensioner",10.0,"NO","YES","NO","YES",32729.00,101934,"SAVINGS","rosalie_perillo@gmail.com","30011111111119","515-555-2600","505-555-5425","CF","307-555-2660"
++"1935","Mel R Polsky","Small HeathBirmingham","29577-6688",82.0,"F","married","pensioner",1.0,"NO","NO","YES","NO",7488.72,101935,"SAVINGS","mel_p@gmx.net","30310111161029","334-555-562",NULL,"TV","601-555-2648"
++"1935","Mel R Polsky","Small HeathBirmingham","29577-6688",82.0,"F","married","pensioner",1.0,"NO","NO","YES","NO",20247.28,201935,"CHECKING","mel_p@gmx.net","30310111161029","334-555-562",NULL,"TV","601-555-2648"
++"1936","Kellie P Hans","WimborneDorset","34471",72.0,"M","married","pensioner",12.0,"NO","NO","YES","NO",75368.00,101936,"SAVINGS","kellie_h@gmx.net","30510011111111","808-555-9693",NULL,"MO","401-555-4063"
++"1937","Mila B Cain","LOWER MIDDLETON STREETDERBYSHIRE","77375",59.0,"M","married","worker",19.0,"YES","NO","YES","NO",8991.00,101937,"SAVINGS","mila_cain@msl.org",NULL,"614-555-4014","651-555-785","HT","417-555-3100"
++"1938","Tyrell T Biltucci","----------Worthing","86325-0999",52.0,"M","single","worker",16.0,"NO","NO","NO","YES",3316.95,101938,"SAVINGS","tbiltucci@cicn.gov","6011567891012132","317-555-1748","617-555-8293","VI","202-555-3068"
++"1938","Tyrell T Biltucci","----------Worthing","86325-0999",52.0,"M","single","worker",16.0,"NO","NO","NO","YES",8968.05,201938,"CHECKING","tbiltucci@cicn.gov","6011567891012132","317-555-1748","617-555-8293","VI","202-555-3068"
++"1939","Chas Y Putz","Lymphe Lymphe Hythe","86406-7533",23.0,"F","single","employee",0.0,"NO","YES","NO","YES",75.00,101939,"SAVINGS","chas_p@de.ibm.com","5169 7990 9185 4334","515-555-6514","360-555-3130","MX",NULL
++"1940","Kenneth O Wills","60 FREDERICK STREETBIRMINGHAM","32333",81.0,"F","widowed","pensioner",13.0,"NO","NO","NO","NO",25606.00,101940,"SAVINGS","kenneth_w@de.ibm.com","5383908528354962","503-555-8593","785-555-538","KE",NULL
++"1941","Jairo M Alva","BEDLINGTONNORTHUMBERLAND","77429-7078",72.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",3687.39,101941,"SAVINGS","alva@aol.com","5580977968891503","417-555-9197",NULL,"IS","334-555-3149"
++"1941","Jairo M Alva","BEDLINGTONNORTHUMBERLAND","77429-7078",72.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",9969.61,201941,"CHECKING","alva@aol.com","5580977968891503","417-555-9197",NULL,"IS","334-555-3149"
++"1942","Mohammed V Dieson","WittonBirmingham","33316-4109",71.0,"M","married","inactive",21.0,"YES","NO","YES","NO",28431.00,101942,"SAVINGS","mdieson@ibm.com","6520224090045455","505-555-4586","904-555-367","MK","802-555-3024"
++"1943","Fergus N Gray","DARNALL ROADSHEFFIELD","80014",67.0,"F","married","craftsmen, storekeepers",8.0,"NO","NO","YES","YES",160193.00,101943,"SAVINGS","fgray@web.de","36111111111111","406-555-8339",NULL,"CA","fergusgray@aol.com"
++"1944","Ferrell B Garrett","Berkswell RoadMeriden","78408",74.0,"M","married","pensioner",31.0,"YES","NO","YES","YES",8044.92,101944,"CHECKING","ferrell_g@msl.org","30510011111111","617-555-2208",NULL,"SM","334-555-4449"
++"1944","Ferrell B Garrett","Berkswell RoadMeriden","78408",74.0,"M","married","pensioner",31.0,"YES","NO","YES","YES",21751.07,201944,"SAVINGS","ferrell_g@msl.org","30510011111111","617-555-2208",NULL,"SM","334-555-4449"
++"1945","Vashti E Dobson","Breakspear WayHemel Hempstead","33139-3031",69.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",208912.00,101945,"CHECKING","vdobson@cicn.gov","340000000000009","803-555-4490","405-555-1959","AL","605-555-1796"
++"1946","Zana O Corona","ROUGH HEY ROADS ROUGH HEY ROADS LANCS.","33908",76.0,"F","married","inactive",35.0,"NO","NO","YES","NO",47479.00,101946,"CHECKING","zana_corona@icloud.com","5520111111111121","405-555-1097",NULL,"LA","405-555-2358"
++"1947","Debbie Q Dupier","RIVERSIDE WAY RIVERSIDE WAY CAMBERLEY SURREY GU","85299",23.0,"M","single","inactive",11.0,"NO","NO","NO","YES",2437.02,101947,"CHECKING","debbie_dupier@msl.org","38111111111119","617-555-5157","602-555-2995","PL","803-555-9376"
++"1947","Debbie Q Dupier","RIVERSIDE WAY RIVERSIDE WAY CAMBERLEY SURREY GU","85299",23.0,"M","single","inactive",11.0,"NO","NO","NO","YES",6588.98,201947,"SAVINGS","debbie_dupier@msl.org","38111111111119","617-555-5157","602-555-2995","PL","803-555-9376"
++"1948","Fulton Q Marx","----------WARWICK","34292",32.0,"M","single","worker",12.0,"NO","YES","NO","YES",13509.00,101948,"CHECKING",NULL,"4024-0071-2159-5481","808-555-1439","317-555-7266","LC","605-555-9650"
++"1949","Cheri I Griego","6/8 HIGH STREETCHESHIRE","99515-2051",46.0,"F","single","employee",2.0,"NO","NO","NO","YES",4953.00,101949,"CHECKING","cheri_griego@msl.org","6011567891012132","505-555-7297",NULL,"RE","cheri.griego@ccdef.net"
++"1950","Shelley H Saarey","3322 PALMTREE DRLAKE HAVASU CITY AZ","86404-1623",32.0,"M","married","worker",6.0,"YES","NO","YES","YES",-92.88,101950,"SAVINGS","shelleysaarey@web.de","4146-6643-9004-5458","208-555-6851",NULL,"BW","385-555-157"
++"1950","Shelley H Saarey","3322 PALMTREE DRLAKE HAVASU CITY AZ","86404-1623",32.0,"M","married","worker",6.0,"YES","NO","YES","YES",-251.12,201950,"CHECKING","shelleysaarey@web.de","4146-6643-9004-5458","208-555-6851",NULL,"BW","385-555-157"
++"1951","Jennifer Q Nettles","14216 HWY 90 WDEFUNIAK SPRINGS FL","32433",71.0,"M","widowed","pensioner",26.0,"NO","NO","NO","NO",48050.00,101951,"SAVINGS","jennifer_n@web.de","3530 1113 3330 0000","302-555-4412",NULL,"BW","503-555-2627"
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties
+new file mode 100755
+index 0000000..2bf7347
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties
+@@ -0,0 +1,17 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# This file contains a list of the ODF sample data files
++simple-example-table.csv=
++simple-example-document.txt=
++bank-clients-short.csv=
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt
+new file mode 100755
+index 0000000..6bdeca2
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt
+@@ -0,0 +1 @@
++This is a simple example text.
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv
+new file mode 100755
+index 0000000..adbd1ab
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv
+@@ -0,0 +1,4 @@
++OMColumnName1,OMColumnName2
++aaaa,1
++bbbb,2
++cccc,3
+diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt
+new file mode 100755
+index 0000000..48d6e85
+--- /dev/null
++++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt
+@@ -0,0 +1 @@
++1.2.0-SNAPSHOT
+\ No newline at end of file
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java
+new file mode 100755
+index 0000000..587ae30
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java
+@@ -0,0 +1,136 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest;
++
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++import java.util.Set;
++import java.util.logging.Level;
++
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.BeforeClass;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
++import org.apache.atlas.odf.core.configuration.ConfigManager;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1;
++
++public class ODFAPITestWithMetadataStoreBase extends ODFTestBase {
++
++	@Before
++	public void createSampleData() throws Exception {
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		mds.resetAllData();
++		mds.createSampleData();
++	}
++
++	@BeforeClass
++	public static void registerServices() throws Exception {
++		ConfigContainer config = JSONUtils.readJSONObjectFromFileInClasspath(ConfigContainer.class, "org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json",
++				ODFAPITestWithMetadataStoreBase.class.getClassLoader());
++		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
++		configManager.updateConfigContainer(config);
++	}
++
++	protected List<MetaDataObjectReference> getTables(MetadataStore mds) {
++		List<MetaDataObjectReference> dataSets = mds.search(mds.newQueryBuilder().objectType("DataFile").build());
++		Assert.assertTrue(dataSets.size() > 0);
++		// take only maximal 5 data sets
++		int MAX_DATASETS = 5;
++		if (dataSets.size() > MAX_DATASETS) {
++			dataSets = dataSets.subList(0, MAX_DATASETS);
++		}
++		return dataSets;
++	}
++
++	public String test(String dsId, List<MetaDataObjectReference> dataSets, AnalysisRequestStatus.State expectedFinalState, boolean requestIsInvalid, String correlationId) throws Exception {
++		log.log(Level.INFO, "Testing ODF with metadata store. Discovery service Id: {0}, dataSets: {1}, expected state: {2}, correlationId: {3}, should request be invalid: {4}", new Object[] { dsId,
++				dataSets, expectedFinalState, correlationId, requestIsInvalid });
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		Assert.assertTrue(dataSets.size() > 0);
++
++		Assert.assertNotNull(mds);
++		AnalysisRequest request = new AnalysisRequest();
++		request.setDiscoveryServiceSequence(Collections.singletonList(dsId));
++		request.setDataSets(dataSets);
++		Map<String, Object> additionalProps = new HashMap<String, Object>();
++		additionalProps.put(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID, correlationId);
++		request.setAdditionalProperties(additionalProps);
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		AnalysisResponse resp = analysisManager.runAnalysis(request);
++
++		log.info("Analysis started on data sets: " + dataSets + ", response: " + JSONUtils.toJSON(resp));
++		log.info("Response message: " + resp.getDetails());
++		if (requestIsInvalid) {
++			Assert.assertTrue(resp.isInvalidRequest());
++			return null;
++		}
++
++		Assert.assertFalse(resp.isInvalidRequest());
++		String id = resp.getId();
++		AnalysisRequestStatus status = null;
++		int maxPolls = 100;
++		do {
++			status = analysisManager.getAnalysisRequestStatus(id);
++			log.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
++					expectedFinalState });
++			maxPolls--;
++			Thread.sleep(1000);
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED));
++		log.log(Level.INFO, "Expected state: {0}, actual state: {1}", new Object[] { expectedFinalState, status.getState() });
++		Assert.assertEquals(expectedFinalState, status.getState());
++		return resp.getId();
++	}
++
++	public void checkMostRecentAnnotations(MetadataStore mds, AnnotationStore as, MetaDataObjectReference ref) {
++		Map<MetaDataObjectReference, MetaDataObject> ref2Retrieved = new HashMap<>();
++		for (Annotation annot : as.getAnnotations(ref, null)) {
++			ref2Retrieved.put(annot.getReference(), annot);
++		}
++
++		List<Annotation> mostRecentAnnotations = AnnotationStoreUtils.getMostRecentAnnotationsByType(as, ref);
++		Assert.assertNotNull(mostRecentAnnotations);
++		Assert.assertTrue(mostRecentAnnotations.size() <= ref2Retrieved.size());
++		Set<MetaDataObjectReference> mostRecentAnnoationRefs = new HashSet<>();
++		Set<String> annotationTypes = new HashSet<>();
++		for (Annotation annot : mostRecentAnnotations) {
++			// every annotation type occurs at most once
++			Assert.assertFalse( annotationTypes.contains(annot.getAnnotationType()));
++			mostRecentAnnoationRefs.add(annot.getReference());
++			annotationTypes.add(annot.getAnnotationType());
++		}
++
++		// all most recent annotations are a subset of all annotations
++		Assert.assertTrue(ref2Retrieved.keySet().containsAll(mostRecentAnnoationRefs));
++
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java
+new file mode 100755
+index 0000000..f0742aa
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java
+@@ -0,0 +1,74 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest;
++
++import java.util.ArrayList;
++import java.util.List;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.MyObject;
++import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.MyOtherObject;
++import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.SyncDiscoveryServiceAnnotation;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ODFAPITestWithMetadataStoreExtendedAnnotations extends ODFAPITestWithMetadataStoreBase {
++
++	@Test
++	public void testSuccessSyncExtendedAnnotations() throws Exception {
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++		List<MetaDataObjectReference> dataSets = getTables(mds);
++		String dsID = "synctestservice-with-extendedannotations";
++
++		String requestId = test(dsID, dataSets, State.FINISHED, false, null);
++
++		log.info("Checking if extended annotations exist for request ID: " + requestId);
++		for (MetaDataObjectReference dataSet : dataSets) {
++			List<SyncDiscoveryServiceAnnotation> annotations = new ArrayList<>();
++			List<Annotation> annots = as.getAnnotations(dataSet, null);
++			Assert.assertTrue(annots.size() >= 2);
++			
++			for (Annotation annot : annots) {		
++				Assert.assertNotNull(annot);
++				if (annot.getAnalysisRun().equals(requestId)) {
++					log.info("Found annotation: " + annot + ", json: " + JSONUtils.toJSON(annot));
++					Assert.assertNotNull(annot);
++					Assert.assertEquals(SyncDiscoveryServiceAnnotation.class, annot.getClass());
++					SyncDiscoveryServiceAnnotation extAnnot = (SyncDiscoveryServiceAnnotation) annot;
++					Assert.assertNotNull(extAnnot.getProp1());
++					Assert.assertEquals(extAnnot.getProp1().hashCode(), extAnnot.getProp2());
++					MyObject mo = extAnnot.getProp3();
++					Assert.assertNotNull(mo);
++					Assert.assertEquals("nested" + extAnnot.getProp1(), mo.getAnotherProp());
++					
++					MyOtherObject moo = mo.getYetAnotherProp();
++					Assert.assertNotNull(moo);
++					Assert.assertEquals("nestedtwolevels" + extAnnot.getProp1(), moo.getMyOtherObjectProperty());
++					annotations.add(extAnnot);
++				}
++			}
++			Assert.assertEquals(2, annotations.size());
++			// TODO check annotations list
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java
+new file mode 100755
+index 0000000..e47b316
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java
+@@ -0,0 +1,69 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest;
++
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ODFAPITestWithMetadataStoreJsonAnnotation extends ODFAPITestWithMetadataStoreBase {
++
++	Logger logger = ODFTestLogger.get();
++
++	String expectedJson = Utils.getInputStreamAsString(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json"), "UTF-8");
++
++	@Test
++	public void testSuccessSyncJsonAnnotations() throws Exception {
++
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++		List<MetaDataObjectReference> dataSets = getTables(mds);
++		String dsID = "synctestservice-with-json-annotations";
++
++		String requestId = test(dsID, dataSets, State.FINISHED, false, null);
++
++		log.info("Checking if annotations exist for request ID: " + requestId);
++		int numMatchingAnnotations = 0;
++		for (MetaDataObjectReference dataSet : dataSets) {
++			List<Annotation> annotationRefs = as.getAnnotations(dataSet, null);
++			Assert.assertTrue(annotationRefs.size() >= 1);
++			for (Annotation annot : annotationRefs) {
++				Assert.assertNotNull(annot);
++				if (annot.getAnalysisRun().equals(requestId)) {
++					log.info("Found annotation: " + annot + ", json: " + JSONUtils.toJSON(annot));
++					Assert.assertNotNull(annot);
++					String jsonProperties = annot.getJsonProperties();
++					Assert.assertNotNull(jsonProperties);
++					logger.info("Actual annotation string: " + jsonProperties + ". Expected json: " + expectedJson);
++					Assert.assertEquals(expectedJson, jsonProperties);
++					numMatchingAnnotations++;
++				}
++			}
++//			Assert.assertEquals(1, numMatchingAnnotations);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java
+new file mode 100755
+index 0000000..6b7c9b9
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java
+@@ -0,0 +1,134 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.UUID;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++
++public class ODFAPITestWithMetadataStoreSimple extends ODFAPITestWithMetadataStoreBase {
++
++	public ODFAPITestWithMetadataStoreSimple() {
++		ODFTestBase.log.info("Classpath: " + System.getProperty("java.class.path"));
++	}
++
++	@Test
++	public void testSuccessASync() throws Exception {
++		testSuccess("asynctestservice-with-annotations");
++	}
++
++	@Test
++	public void testSuccessSync() throws Exception {
++		testSuccess("synctestservice-with-annotations");
++	}
++
++	void testSuccess(String dsId) throws Exception {
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++		List<MetaDataObjectReference> dataSets = getTables(mds);
++
++		String correlationId = UUID.randomUUID().toString();
++		
++		String requestId = test(dsId, dataSets, AnalysisRequestStatus.State.FINISHED, false, correlationId);
++		Thread.sleep(3000); // give time for notifications to arrive
++
++		List<MetaDataObjectReference> annotationsOfThisRun = new ArrayList<>();
++		
++		ODFTestBase.log.info("Checking if annotations exist");
++		for (MetaDataObjectReference dataSet : dataSets) {
++			List<Annotation> retrievedAnnotations = as.getAnnotations(dataSet, null);
++			Assert.assertTrue(retrievedAnnotations.size() > 0);
++			List<Annotation> annotations = new ArrayList<>();
++			for (Annotation annot : retrievedAnnotations) {
++				Assert.assertNotNull(annot);
++				Assert.assertNotNull(annot.getAnalysisRun());
++				if (annot.getAnalysisRun().equals(requestId)) {
++					annotationsOfThisRun.add(annot.getReference());
++					Assert.assertNotNull(annot.getJsonProperties());
++					JSONObject props = new JSONObject(annot.getJsonProperties());
++					if (props != null) {
++						String annotCorrId = (String) props.get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID);
++						if (annotCorrId != null) {
++							Assert.assertNotNull(annot.getAnnotationType());
++						}
++					}
++					annotations.add(annot);
++				}
++			}
++			ODFTestBase.log.info("Checking that annotation notifications were received");
++			// check that we got notified of all annotations
++			
++			// assume at least that those new annotations were created
++			Assert.assertTrue(TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations() <= annotations.size());
++			int found = 0;
++			for (int i = 0; i < TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations(); i++) {
++				String[] annotValues = TestSyncDiscoveryServiceWritingAnnotations1.getPropsOfNthAnnotation(i);
++				for (Annotation annotation : annotations) {
++					if (annotation.getAnnotationType() != null) {
++						if (annotation.getAnnotationType().equals(annotValues[0])) {
++							JSONObject jo = new JSONObject(annotation.getJsonProperties());
++							String foundCorrelationId = (String) jo.get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID);
++							// only look at those where the correlation ID property is set
++							if (correlationId.equals(foundCorrelationId)) {
++								String val = (String) jo.get(annotValues[1]);
++								Assert.assertEquals(annotValues[2], val);
++								Assert.assertEquals(requestId, annotation.getAnalysisRun());
++								// annotation types and the JSON properties match
++								found++;
++							}
++						}
++					}
++				}
++			}
++			// assert that we have found all and not more
++			Assert.assertEquals(TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations(), found);
++
++			checkMostRecentAnnotations(mds, new ODFFactory().create().getAnnotationStore(), dataSet);
++		}
++	}
++
++	
++	
++	@Test
++	public void testFailureASync() throws Exception {
++		testFailure("asynctestservice-with-annotations");
++	}
++
++	@Test
++	public void testFailureSync() throws Exception {
++		testFailure("synctestservice-with-annotations");
++	}
++
++	void testFailure(String dsId) throws Exception {
++		MetaDataObjectReference invalidRef = new MetaDataObjectReference();
++		invalidRef.setId("error-this-is-hopefully-an-invalid-id");
++		List<MetaDataObjectReference> dataSets = Collections.singletonList(invalidRef);
++		test(dsId, dataSets, AnalysisRequestStatus.State.ERROR, true, UUID.randomUUID().toString());
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java
+new file mode 100755
+index 0000000..af70b5a
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java
+@@ -0,0 +1,92 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.connectivity;
++
++import java.sql.ResultSet;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.connectivity.DataSetRetriever;
++import org.apache.atlas.odf.api.connectivity.DataSetRetrieverImpl;
++import org.apache.atlas.odf.api.connectivity.JDBCRetrievalResult;
++import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.integrationtest.metadata.importer.JDBCMetadataImporterTest;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++
++public class DataSetRetrieverTest extends ODFTestBase {
++
++	static Logger logger = ODFTestLogger.get();
++	
++	static MetadataStore createMetadataStore() throws Exception {
++		return new ODFFactory().create().getMetadataStore();
++	}
++	
++	@BeforeClass
++	public static void setupImport() throws Exception {
++		MetadataStore mds = createMetadataStore();
++		// create sample data only if it has not been created yet
++		mds.createSampleData();
++		JDBCMetadataImporterTest.runTestImport(mds);
++	}
++	
++	@Test
++	public void testDataSetRetrievalJDBC() throws Exception {
++		MetadataStore ams = createMetadataStore();
++		DataSetRetriever retriever = new DataSetRetrieverImpl(ams);
++		List<MetaDataObjectReference> refs = ams.search(ams.newQueryBuilder().objectType("Table").build());
++		Assert.assertTrue(refs.size() > 0);
++		int retrievedDataSets = 0;
++		for (MetaDataObjectReference ref : refs) {
++			Table table = (Table) ams.retrieve(ref);
++			logger.info("Retrieving table: " + table.getName() + ", " + table.getReference().getUrl());
++			if (retriever.canRetrieveDataSet(table)) {
++				retrievedDataSets++;
++				MaterializedDataSet mds = retriever.retrieveRelationalDataSet(table);
++				Assert.assertNotNull(mds);
++				Assert.assertEquals(table, mds.getTable());
++				int numberOfColumns = ams.getColumns(table).size();
++				Assert.assertEquals(numberOfColumns, mds.getColumns().size());
++				Assert.assertNotNull(mds.getData());
++				Assert.assertTrue(mds.getData().size() > 0);
++				for (List<Object> row : mds.getData()) {
++					Assert.assertEquals(row.size(),numberOfColumns);
++				}
++				
++				// now test JDBC method
++				JDBCRetrievalResult jdbcResult = retriever.retrieveTableAsJDBCResultSet(table);
++				ResultSet rs = jdbcResult.getPreparedStatement().executeQuery();
++				Assert.assertEquals(mds.getColumns().size(), rs.getMetaData().getColumnCount());
++				int count = 0;
++				while (rs.next()) {
++					count++;
++				}
++				Assert.assertEquals(mds.getData().size(), count);
++				
++				// only run one test
++				break;
++			}
++		}
++		Assert.assertEquals("Number of retrieved data sets does not meet the expected value. ", 1, retrievedDataSets);
++		
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java
+new file mode 100755
+index 0000000..47d3a3d
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java
+@@ -0,0 +1,303 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.metadata;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Set;
++import java.util.UUID;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.DefaultMetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Connection;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.Database;
++
++public abstract class MetadataStoreTestBase {
++	private Logger logger = Logger.getLogger(MetadataStoreTestBase.class.getName());
++	private static final String analysisRun = UUID.randomUUID().toString();
++
++	protected abstract MetadataStore getMetadataStore();
++
++	public static WritableMetadataStore getWritableMetadataStore() {
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		if (!(mds instanceof WritableMetadataStore)) {
++			String errorText = "The MetadataStore implementation ''{0}'' does not support the WritableMetadataStore interface.";
++			Assert.fail(MessageFormat.format(errorText , mds.getClass()));
++			return null;
++		}
++		return (WritableMetadataStore) mds;
++	}
++
++	public static void createAdditionalTestData(WritableMetadataStore mds) {
++		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
++
++		JDBCConnection connection = new JDBCConnection();
++		connection.setName("connection1");
++
++		Table table1 = new Table();
++		table1.setName("table1");
++		Table table2 = new Table();
++		table2.setName("table2");
++
++		Schema schema1 = new Schema();
++		schema1.setName("schema1");
++		MetaDataObjectReference schemaRef = mds.createObject(schema1);
++		mds.addTableReference(schema1, mds.createObject(table1));
++		mds.addTableReference(schema1, mds.createObject(table2));
++
++		Database dataStore = new Database();
++		dataStore.setName("database1");
++		mds.createObject(dataStore);
++		mds.addSchemaReference(dataStore, schemaRef);
++		mds.addConnectionReference(dataStore, mds.createObject(connection));
++
++		DataFile file1 = new DataFile();
++		file1.setName("file1");
++		DataFile file2 = new DataFile();
++		file2.setName("file2");
++
++		DataFileFolder nestedFolder = new DataFileFolder();
++		nestedFolder.setName("nestedFolder");
++		MetaDataObjectReference nestedFolderRef = mds.createObject(nestedFolder);
++		mds.addDataFileReference(nestedFolder, mds.createObject(file1));
++		mds.addDataFileReference(nestedFolder, mds.createObject(file2));
++
++		DataFileFolder rootFolder = new DataFileFolder();
++		rootFolder.setName("rootFolder");
++		mds.createObject(rootFolder);
++		mds.addDataFileFolderReference(rootFolder, nestedFolderRef);
++
++		ProfilingAnnotation pa = new ProfilingAnnotation();
++		pa.setName("A profiling annotation");
++		pa.setProfiledObject(bankClientsShortRef);
++		pa.setAnalysisRun(analysisRun);
++		mds.createObject(pa);
++
++		ClassificationAnnotation ca = new ClassificationAnnotation();
++		ca.setName("A classification annotation");
++		ca.setClassifiedObject(bankClientsShortRef);
++		ca.setAnalysisRun(analysisRun);
++		ca.setClassifyingObjects(Collections.singletonList(bankClientsShortRef));
++		mds.createObject(ca);
++
++		RelationshipAnnotation ra = new RelationshipAnnotation();
++		ra.setName("A relationship annotation");
++		ra.setRelatedObjects(Collections.singletonList(bankClientsShortRef));
++		ra.setAnalysisRun(analysisRun);
++		mds.createObject(ra);
++
++		mds.commit();
++	}
++
++	@Before
++	public void createSampleData() {
++		WritableMetadataStore mds = getWritableMetadataStore();
++		mds.resetAllData();
++		mds.createSampleData();
++		createAdditionalTestData(mds);
++	}
++
++	public static void checkQueryResults(MetadataStore mds, String[] expectedObjectNames, String searchTerm, boolean isSubset) {
++		HashSet<String> expectedResults = new HashSet<String>(Arrays.asList(expectedObjectNames));
++		List<MetaDataObjectReference> searchResult = mds.search(searchTerm);
++		Set<String> foundResults = new HashSet<>();
++		for (MetaDataObjectReference ref : searchResult) {
++			foundResults.add(mds.retrieve(ref).getName());
++		}
++		if (isSubset) {
++			String messageText = "Metadata search term ''{0}'' did not return expected subset of objects. Expected ''{1}'' but received ''{2}''.";
++			Assert.assertTrue(MessageFormat.format(messageText, new Object[] {searchTerm, expectedResults, foundResults}), foundResults.containsAll(expectedResults));
++		} else {
++			String messageText = "Metadata search term ''{0}'' did not return expected results. Expected ''{1}'' but received ''{2}''.";
++			Assert.assertTrue(MessageFormat.format(messageText, new Object[] {searchTerm, expectedResults, foundResults}), foundResults.equals(expectedResults));
++		}
++	}
++
++	public static void checkReferencedObjects(String[] expectedObjectNames, List<? extends MetaDataObject> referencedObjects, boolean isSubset) {
++		HashSet<String> expectedResults = new HashSet<String>(Arrays.asList(expectedObjectNames));
++		Set<String> actualNames = new HashSet<>();
++		for (MetaDataObject obj : referencedObjects) {
++			actualNames.add(obj.getName());
++		}
++		if (isSubset) {
++			String messageText = "Actual object names ''{0}'' are not a subset of expected names ''{1}''.";
++			Assert.assertTrue(MessageFormat.format(messageText, new Object[] { actualNames, expectedResults }), actualNames.containsAll(expectedResults));
++		} else {
++			String messageText = "Actual object names ''{0}'' do not match expected names ''{1}''.";
++			Assert.assertTrue(MessageFormat.format(messageText, new Object[] { actualNames, expectedResults }), actualNames.equals(expectedResults));
++		}
++	}
++
++	void checkFailingQuery(MetadataStore mds, String searchTerm) {
++		try {
++			logger.log(Level.INFO, "Checking incorrect query \"{0}\"", searchTerm);
++			List<MetaDataObjectReference> searchResult = mds.search(searchTerm);
++			if (searchResult != null) {
++				// Search must return null or throw exception
++				Assert.fail(MessageFormat.format("Incorrect query \"{0}\" did not throw the expected exception.", searchTerm));
++			}
++		} catch (MetadataStoreException e) {
++			logger.log(Level.INFO, "Catching expected exception.", e);
++		}
++	}
++
++	@Test
++	public void testSearchAndRetrieve() {
++		MetadataStore mds = getMetadataStore();
++		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
++		Assert.assertEquals("The metadata store did not retrieve the object with the expected name.", "BankClientsShort", mds.retrieve(bankClientsShortRef).getName());
++
++		// Test queries with conditions
++		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
++		checkQueryResults(mds, new String[] { "SimpleExampleTable", "file2", "file1"}, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.NOT_EQUALS, "BankClientsShort").build(), false);
++		checkQueryResults(mds, new String[] { "NAME" },
++				mds.newQueryBuilder().objectType("Column").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "NAME").simpleCondition("dataType", MetadataQueryBuilder.COMPARATOR.EQUALS, "string").build(), false);
++
++		// Test type hierarchy
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").build(), true);
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("RelationalDataSet").build(), true);
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable", "Simple URL example document", "Simple local example document", "table1", "table2", "file2", "file1" }, mds.newQueryBuilder().objectType("DataSet").build(), false);
++		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("MetaDataObject").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
++	}
++	
++	public static Database getDatabaseTestObject(MetadataStore mds) {
++		String dataStoreQuery = mds.newQueryBuilder().objectType("DataStore").build();
++		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "database1"}, dataStoreQuery, false);
++		return (Database) mds.retrieve(mds.search(dataStoreQuery).get(0));
++	}
++
++	public static Table getTableTestObject(MetadataStore mds) {
++		String tableQuery = mds.newQueryBuilder().objectType("Table").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "table1").build();
++		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "table1"}, tableQuery, false);
++		return (Table) mds.retrieve(mds.search(tableQuery).get(0));
++	}
++
++	public static DataFile getDataFileTestObject(MetadataStore mds) {
++		String dataFileQuery = mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "SimpleExampleTable").build();
++		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "SimpleExampleTable"}, dataFileQuery, false);
++		return (DataFile) mds.retrieve(mds.search(dataFileQuery).get(0));
++	}
++
++	public static DataFileFolder getDataFileFolderTestObject(MetadataStore mds) {
++		String folderQuery = mds.newQueryBuilder().objectType("DataFileFolder").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "rootFolder").build();
++		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "rootFolder"}, folderQuery, false);
++		return (DataFileFolder) mds.retrieve(mds.search(folderQuery).get(0));
++	}
++
++	public static void checkReferences(MetadataStore mds, Database database) throws Exception {
++		List<Schema> schemaList = mds.getSchemas(database);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "schema1" }, schemaList, false);
++		List<Table> tableList = mds.getTables(schemaList.get(0));
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "table1", "table2" }, tableList, false);
++		List<Connection> connectionList = mds.getConnections(database);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "connection1" }, connectionList, false);
++	}
++
++	public static void checkReferences(MetadataStore mds, Table table) throws Exception {
++		JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) mds.getConnectionInfo(table);
++		Assert.assertTrue("Connection is not set in connection info.", connectionInfo.getConnections().size() > 0);
++		Assert.assertEquals("Connection does not match expected name.", "connection1", connectionInfo.getConnections().get(0).getName());
++		Assert.assertEquals("Schema name of connection info does not match expected value.", "schema1", connectionInfo.getSchemaName());
++	}
++
++	public static void checkReferences(MetadataStore mds, DataFileFolder folder) throws Exception {
++		List<DataFileFolder> nestedFolderList = mds.getDataFileFolders(folder);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "nestedFolder" }, nestedFolderList, false);
++		List<DataFile> fileList = mds.getDataFiles(nestedFolderList.get(0));
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "file1", "file2" }, fileList, false);
++	}
++
++	public static void checkReferences(MetadataStore mds, DataFile file) throws Exception {
++		List<Column> columnList = mds.getColumns(file);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "ColumnName1", "ColumnName2" }, columnList, false);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "SimpleExampleTable" }, Collections.singletonList(mds.getParent(columnList.get(0))), false);
++		MetadataStoreTestBase.checkReferencedObjects(new String[] { "ColumnName1", "ColumnName2" }, mds.getChildren(file), false);
++	}
++
++	@Test
++	public void testReferences() throws Exception {
++		MetadataStore mds = getMetadataStore();
++		checkReferences(mds, getDatabaseTestObject(mds));
++		checkReferences(mds, getTableTestObject(mds));
++		checkReferences(mds, getDataFileFolderTestObject(mds));
++		checkReferences(mds, getDataFileTestObject(mds));
++	}
++
++	@Test
++	public void testErrorHandling() {
++		MetadataStore mds = getMetadataStore();
++		MetaDataObjectReference nonExistentRef = new MetaDataObjectReference();
++		nonExistentRef.setId("non-existing-reference-id");
++		nonExistentRef.setRepositoryId(mds.getRepositoryId());
++
++		Assert.assertEquals("A null value was expected when retrieving a non-existend object.", null, mds.retrieve(nonExistentRef));
++		String errorText = "Metadata search should have returned an empty result set.";
++		Assert.assertEquals(errorText,  mds.search(mds.newQueryBuilder().objectType("nonExistentType").build()), new ArrayList<MetaDataObjectReference>());
++		Assert.assertEquals(errorText,  mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "nonExistentName").build()), new ArrayList<MetaDataObjectReference>());
++
++		if (!mds.getProperties().get(MetadataStore.STORE_PROPERTY_TYPE).equals("atlas")) {
++			// Skip this test because Atlas accepts this query as text search
++			checkFailingQuery(mds, "justAsSingleToken");
++			// Skip this test of Atlas because it does not return an error
++			String validQueryWithCondition = mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build();
++			checkFailingQuery(mds, validQueryWithCondition + DefaultMetadataQueryBuilder.SEPARATOR_STRING + "additionalTrailingToken");
++			String validDataSetQuery = mds.newQueryBuilder().objectType("DataFile").build();
++			checkFailingQuery(mds, validDataSetQuery + DefaultMetadataQueryBuilder.SEPARATOR_STRING + "additionalTrailingToken");
++		}
++	}
++
++	@Test
++	public void testAnnotations() {
++		MetadataStore mds = getMetadataStore();
++
++		String annotationQueryString = mds.newQueryBuilder().objectType("Annotation").build();
++		checkQueryResults(mds, new String[] { "A profiling annotation", "A classification annotation", "A relationship annotation" }, annotationQueryString, false);
++		String analysisRunQuery = mds.newQueryBuilder().objectType("Annotation").simpleCondition("analysisRun", MetadataQueryBuilder.COMPARATOR.EQUALS, analysisRun).build();
++		checkQueryResults(mds, new String[] { "A profiling annotation", "A classification annotation", "A relationship annotation" }, analysisRunQuery, false);
++	}
++
++	@Test
++	public void testResetAllData() {
++		MetadataStore mds = getMetadataStore();
++		mds.resetAllData();
++		String emptyResultSet = mds.newQueryBuilder().objectType("MetaDataObject").build();
++		checkQueryResults(mds, new String[] {}, emptyResultSet, false);
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java
+new file mode 100755
+index 0000000..5012ab3
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java
+@@ -0,0 +1,24 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.metadata;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++
++public class WritableMetadataStoreTest extends MetadataStoreTestBase{
++
++	protected MetadataStore getMetadataStore() {
++		return new ODFFactory().create().getMetadataStore();
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java
+new file mode 100755
+index 0000000..1f00a94
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java
+@@ -0,0 +1,214 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.metadata.importer;
++
++import java.sql.Connection;
++import java.sql.DriverManager;
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.HashMap;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.api.metadata.models.Schema;
++import org.apache.atlas.odf.api.metadata.models.Column;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.Table;
++
++public class JDBCMetadataImporterTest extends ODFTestBase {
++	static Logger logger = Logger.getLogger(JDBCMetadataImporterTest.class.getName());
++
++	static boolean testDBRan = false;
++	public static final String SOURCE_DB1 = "DBSAMPLE1";
++	public static final String SOURCE_DB2 = "DBSAMPLE2";
++	public static final String DATABASE1_NAME = SOURCE_DB1;
++	public static final String DATABASE2_NAME =SOURCE_DB2;
++	public static final String SCHEMA1_NAME = "APP1";
++	public static final String SCHEMA2_NAME = "APP2";
++	public static final String TABLE1_NAME = "EMPLOYEE" + System.currentTimeMillis();
++	public static final String TABLE2_NAME = "EMPLOYEE_SHORT" + System.currentTimeMillis();
++
++	@BeforeClass
++	public static void populateTestDB() throws Exception {
++		if (testDBRan) {
++			return;
++		}
++		createTestTables(SOURCE_DB1, SCHEMA1_NAME, TABLE1_NAME, TABLE2_NAME);
++		createTestTables(SOURCE_DB1, SCHEMA2_NAME, TABLE1_NAME, TABLE2_NAME);
++		// Switch table names so that the table named TABLE2_NAME has more columns in the SOURCE_DB2 than it has in SOURCE_DB1
++		createTestTables(SOURCE_DB2, SCHEMA1_NAME, TABLE2_NAME, TABLE1_NAME);
++		testDBRan = true;
++	}
++
++	private static String getConnectionUrl(String dbName) {
++		String dbDir = "/tmp/odf-derby/" + dbName;
++		String connectionURL = "jdbc:derby:" + dbDir + ";create=true";
++		return connectionURL;
++	}
++
++	private static void createTestTables(String dbName, String schemaName, String tableName1, String tableName2) throws Exception {
++		Connection conn = DriverManager.getConnection(getConnectionUrl(dbName));
++
++		String[] stats = new String[] {
++		"CREATE TABLE " + schemaName + "." + tableName1 + " (\r\n" + //
++				"		EMPNO CHAR(6) NOT NULL,\r\n" + //
++				"		FIRSTNME VARCHAR(12) NOT NULL,\r\n" + // 
++				"		MIDINIT CHAR(1),\r\n" + //
++				"		LASTNAME VARCHAR(15) NOT NULL,\r\n" + // 
++				"		WORKDEPT CHAR(3),\r\n" + //
++				"		PHONENO CHAR(4),\r\n" + //
++				"		HIREDATE DATE,\r\n" + //
++				"		JOB CHAR(8),\r\n" + //
++				"		EDLEVEL SMALLINT NOT NULL,\r\n" + // 
++				"		SEX CHAR(1),\r\n" + //
++				"		BIRTHDATE DATE,\r\n" + //
++				"		SALARY DECIMAL(9 , 2),\r\n" + // 
++				"		BONUS DECIMAL(9 , 2),\r\n" + //
++				"		COMM DECIMAL(9 , 2)\r\n" + //
++				"	)",			
++		"INSERT INTO " + schemaName + "." + tableName1 + " VALUES ('000010','CHRISTINE','I','HAAS','A00','3978','1995-01-01','PRES    ',18,'F','1963-08-24',152750.00,1000.00,4220.00)",
++		"INSERT INTO " + schemaName + "." + tableName1 + " VALUES ('000020','MICHAEL','L','THOMPSON','B01','3476','2003-10-10','MANAGER ',18,'M','1978-02-02',94250.00,800.00,3300.00)",
++		// Note that the 2nd table has a subset of the columns of the first table
++		"CREATE TABLE " + schemaName + "." + tableName2 + " (\r\n" + //
++				"		EMPNO CHAR(6) NOT NULL,\r\n" + //
++				"		FIRSTNME VARCHAR(12) NOT NULL,\r\n" + //
++				"		MIDINIT CHAR(1),\r\n" + //
++				"		LASTNAME VARCHAR(15) NOT NULL\r\n" + //
++				"	)",
++		"INSERT INTO " + schemaName + "." + tableName2 + " VALUES ('000010','CHRISTINE','I','HAAS')",
++		"INSERT INTO " + schemaName + "." + tableName2 + " VALUES ('000020','MICHAEL','L','THOMPSON')"
++		};
++
++		for (String stat : stats) {
++			boolean result = conn.createStatement().execute(stat);
++			logger.info("Result of statement: " + result);
++		}
++	}
++
++	private static void runTestImport(MetadataStore mds, String connectionDbName, String importDbName, String schemaName, String tableName) throws Exception {
++		populateTestDB();
++		JDBCMetadataImporter importer = new ODFInternalFactory().create(JDBCMetadataImporter.class);
++		JDBCConnection conn = new JDBCConnection();
++		conn.setJdbcConnectionString(getConnectionUrl(connectionDbName));
++		conn.setUser("dummyUser");
++		conn.setPassword("dummyPassword");
++		JDBCMetadataImportResult importResult = importer.importTables(conn, importDbName, schemaName, tableName);
++		Assert.assertTrue("JDBCMetadataImportResult does not refer to imported database.", importResult.getDatabaseName().equals(importDbName));
++		Assert.assertTrue("JDBCMetadataImportResult does not refer to imported table.", importResult.getTableNames().contains(schemaName + "." + tableName));
++	}
++
++	public static void runTestImport(MetadataStore mds) throws Exception {
++		runTestImport(mds, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE1_NAME);
++	}
++
++	@Test
++	public void testSimpleImport() throws Exception {
++		MetadataStore ams = new ODFFactory().create().getMetadataStore();
++		ams.resetAllData();
++
++		List<String> expectedDatabases = new ArrayList<String>();
++		HashMap<String, List<String>> expectedSchemasForDatabase = new HashMap<String, List<String>>();
++		HashMap<String, List<String>> expectedTablesForSchema = new HashMap<String, List<String>>();
++		HashMap<String, List<String>> expectedColumnsForTable = new HashMap<String, List<String>>();
++
++		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE1_NAME);
++
++		expectedDatabases.add(DATABASE1_NAME);
++		expectedSchemasForDatabase.put(DATABASE1_NAME, new ArrayList<String>());
++		expectedSchemasForDatabase.get(DATABASE1_NAME).add(SCHEMA1_NAME);
++		expectedTablesForSchema.put(SCHEMA1_NAME, new ArrayList<String>());
++		expectedTablesForSchema.get(SCHEMA1_NAME).add(TABLE1_NAME);
++		expectedColumnsForTable.put(TABLE1_NAME, new ArrayList<String>());
++		expectedColumnsForTable.get(TABLE1_NAME).addAll(Arrays.asList(new String[] { "EMPNO", "FIRSTNME", "MIDINIT", "LASTNAME",
++				"WORKDEPT", "PHONENO", "HIREDATE", "JOB", "EDLEVEL", "SEX", "BIRTHDATE", "SALARY", "BONUS", "COMM" }));
++		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
++
++		// Add another table to an existing schema in an existing database
++		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE2_NAME);
++
++		expectedTablesForSchema.get(SCHEMA1_NAME).add(TABLE2_NAME);
++		expectedColumnsForTable.put(TABLE2_NAME, new ArrayList<String>());
++		expectedColumnsForTable.get(TABLE2_NAME).addAll(Arrays.asList(new String[] { "EMPNO", "FIRSTNME", "MIDINIT", "LASTNAME" }));
++		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
++
++		// Add another schema and table to an existing database
++		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA2_NAME, TABLE1_NAME);
++
++		expectedSchemasForDatabase.get(DATABASE1_NAME).add(SCHEMA2_NAME);
++		expectedTablesForSchema.put(SCHEMA2_NAME, new ArrayList<String>());
++		expectedTablesForSchema.get(SCHEMA2_NAME).add(TABLE1_NAME);
++		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
++
++		// Import TABLE2_NAME again from SOURCE_DB2 where it has more columns than in SOURCE_DB1
++		runTestImport(ams, SOURCE_DB2, DATABASE1_NAME, SCHEMA1_NAME, TABLE2_NAME);
++
++		// validate that additional columns have been added to the existing table object TABLE2_NAME.
++		expectedColumnsForTable.get(TABLE2_NAME).addAll(Arrays.asList(new String[] { "WORKDEPT", "PHONENO", "HIREDATE", "JOB", "EDLEVEL", "SEX", "BIRTHDATE", "SALARY", "BONUS", "COMM" }));
++		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
++	}
++
++	private void validateImportedObjects(MetadataStore mds, List<String> expectedDatabases, HashMap<String, List<String>> expectedSchemasForDatabase, HashMap<String,
++			List<String>> expectedTablesForSchema, HashMap<String, List<String>> expectedColumnsForTable) throws Exception{
++		for (String dbName : expectedDatabases) {
++			String query = mds.newQueryBuilder().objectType("Database").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, dbName).build();
++			List<MetaDataObjectReference> dbs = mds.search(query);
++			Assert.assertEquals("Number of databases does not match expected value.", 1, dbs.size());
++			Database database = (Database) mds.retrieve(dbs.get(0));
++			logger.log(Level.INFO, MessageFormat.format("Reference ''{0}''.", JSONUtils.toJSON(database)));
++			int numberOfMatchingConnections = 0;
++			for (org.apache.atlas.odf.api.metadata.models.Connection con : mds.getConnections(database)) {
++				if (getConnectionUrl(database.getName()).equals(((JDBCConnection) mds.retrieve(con.getReference())).getJdbcConnectionString())) {
++					numberOfMatchingConnections++;
++				}
++			}
++			Assert.assertEquals("Number of matching JDBC connections does not match expected value.", 1, numberOfMatchingConnections);
++			List<String> actualSchemaNames = new ArrayList<String>();
++			for (Schema schema : mds.getSchemas(database)) {
++				actualSchemaNames.add(schema.getName());
++
++				List<String> actualTableNames = new ArrayList<String>();
++				for (Table table : mds.getTables(schema)) {
++					actualTableNames.add(table.getName());
++
++					List<String> actualColumnNames = new ArrayList<String>();
++					for (Column column : mds.getColumns(table)) {
++						actualColumnNames.add(column.getName());
++					}
++					Assert.assertTrue("Expected columns are missing from metadata store.", actualColumnNames.containsAll(expectedColumnsForTable.get(table.getName())));
++					Assert.assertTrue("Importer has not imported all expected columns.", expectedColumnsForTable.get(table.getName()).containsAll(actualColumnNames));
++				}
++				Assert.assertTrue("Expected tables are missing from metadata store.", actualTableNames.containsAll(expectedTablesForSchema.get(schema.getName())));
++				Assert.assertTrue("Importer has not imported all expected tables.", expectedTablesForSchema.get(schema.getName()).containsAll(actualTableNames));
++			}
++			Assert.assertTrue("Expected schemas are missing from metadata store.", actualSchemaNames.containsAll(expectedSchemasForDatabase.get(database.getName())));
++			Assert.assertTrue("Importer has not imported all expected schemas.", expectedSchemasForDatabase.get(database.getName()).containsAll(actualSchemaNames));
++		}
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java
+new file mode 100755
+index 0000000..ec0aa9a
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java
+@@ -0,0 +1,243 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.metadata.internal.spark;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
++import org.apache.atlas.odf.api.settings.SparkConfig;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++
++public class SparkDiscoveryServiceLocalTest extends ODFTestBase {
++	protected static Logger logger = Logger.getLogger(SparkDiscoveryServiceLocalTest.class.getName());
++	public static int WAIT_MS_BETWEEN_POLLING = 2000;
++	public static int MAX_NUMBER_OF_POLLS = 400;
++	public static String DISCOVERY_SERVICE_ID = "spark-summary-statistics-example-service";
++	public static String DASHDB_DB = "BLUDB";
++	public static String DASHDB_SCHEMA = "SAMPLES";
++	public static String DASHDB_TABLE = "CUST_RETENTION_LIFE_DURATION";
++	public static enum DATASET_TYPE {
++		FILE, TABLE
++	}
++
++	@BeforeClass
++	public static void createSampleData() throws Exception {
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		if (mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).size() == 0) {
++			mds.createSampleData();
++		}
++	}
++
++	public static SparkConfig getLocalSparkConfig() {
++		SparkConfig config = new SparkConfig();
++		config.setClusterMasterUrl("local");
++		return config;
++	}
++
++	public static DiscoveryServiceProperties getSparkSummaryStatisticsService() throws JSONException {
++		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
++		dsProperties.setId(DISCOVERY_SERVICE_ID);
++		dsProperties.setName("Spark summary statistics service");
++		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
++		dsProperties.setCustomDescription("");
++		dsProperties.setIconUrl("spark.png");
++		dsProperties.setLink("http://www.spark.apache.org");
++		dsProperties.setPrerequisiteAnnotationTypes(null);
++		dsProperties.setResultingAnnotationTypes(null);
++		dsProperties.setSupportedObjectTypes(null);
++		dsProperties.setAssignedObjectTypes(null);
++		dsProperties.setAssignedObjectCandidates(null);
++		dsProperties.setParallelismCount(2);
++		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
++		endpoint.setJar("META-INF/spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
++		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
++		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
++		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
++		return dsProperties;
++	}
++
++	public static DiscoveryServiceProperties getSparkDiscoveryServiceExample() throws JSONException {
++		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
++		dsProperties.setId(DISCOVERY_SERVICE_ID);
++		dsProperties.setName("Spark summary statistics service");
++		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
++		dsProperties.setCustomDescription("");
++		dsProperties.setIconUrl("spark.png");
++		dsProperties.setLink("http://www.spark.apache.org");
++		dsProperties.setPrerequisiteAnnotationTypes(null);
++		dsProperties.setResultingAnnotationTypes(null);
++		dsProperties.setSupportedObjectTypes(null);
++		dsProperties.setAssignedObjectTypes(null);
++		dsProperties.setAssignedObjectCandidates(null);
++		dsProperties.setParallelismCount(2);
++		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
++		endpoint.setJar("META-INF/spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
++		endpoint.setClassName("org.apache.atlas.odf.core.spark.SparkDiscoveryServiceExample");
++		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.Generic);
++		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
++		return dsProperties;
++	}
++
++	public static DataFile getTestDataFile(MetadataStore mds) {
++		DataFile dataSet = null;
++		List<MetaDataObjectReference> refs = mds.search(mds.newQueryBuilder().objectType("DataFile").build());
++		for (MetaDataObjectReference ref : refs) {
++			DataFile file = (DataFile) mds.retrieve(ref);
++			if (file.getName().equals("BankClientsShort")) {
++				dataSet = file;
++				break;
++			}
++		}
++		Assert.assertNotNull(dataSet);
++		logger.log(Level.INFO, "Testing Spark discovery service on metadata object {0} (ref: {1})", new Object[] { dataSet.getName(), dataSet.getReference() });
++		return dataSet;
++	}
++
++	public static Table getTestTable(MetadataStore mds) {
++		Table dataSet = null;
++		List<MetaDataObjectReference> refs = mds.search(mds.newQueryBuilder().objectType("Table").build());
++		for (MetaDataObjectReference ref : refs) {
++			Table table = (Table) mds.retrieve(ref);
++			if (table.getName().equals(DASHDB_TABLE)) {
++				dataSet = table;
++				break;
++			}
++		}
++		Assert.assertNotNull(dataSet);
++		logger.log(Level.INFO, "Testing Spark discovery service on metadata object {0} (ref: {1})", new Object[] { dataSet.getName(), dataSet.getReference() });
++		return dataSet;
++	}
++
++	public static AnalysisRequest getSparkAnalysisRequest(DataSet dataSet) {
++		AnalysisRequest request = new AnalysisRequest();
++		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
++		dataSetRefs.add(dataSet.getReference());
++		request.setDataSets(dataSetRefs);
++		List<String> serviceIds = Arrays.asList(new String[]{DISCOVERY_SERVICE_ID});
++		request.setDiscoveryServiceSequence(serviceIds);
++		return request;
++	}
++
++	public void runSparkServiceTest(SparkConfig sparkConfig, DATASET_TYPE dataSetType, DiscoveryServiceProperties regInfo, String[] annotationNames) throws Exception{
++		logger.info("Using Spark configuration: " + JSONUtils.toJSON(sparkConfig));
++		SettingsManager config = new ODFFactory().create().getSettingsManager();
++		ODFSettings settings = config.getODFSettings();
++		settings.setSparkConfig(sparkConfig);
++		config.updateODFSettings(settings);
++
++		logger.info("Using discovery service: " + JSONUtils.toJSON(regInfo));
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++
++		try {
++			discoveryServicesManager.deleteDiscoveryService(DISCOVERY_SERVICE_ID);
++		} catch(ServiceNotFoundException e) {
++			// Ignore exception because service may not exist
++		}
++		discoveryServicesManager.createDiscoveryService(regInfo);
++
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		Assert.assertNotNull(mds);
++		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++		Assert.assertNotNull(as);
++
++		RelationalDataSet dataSet = null;
++		if (dataSetType == DATASET_TYPE.FILE) {
++			dataSet = getTestDataFile(mds);
++		} else if (dataSetType == DATASET_TYPE.TABLE) {
++			dataSet = getTestTable(mds);
++		} else {
++			Assert.fail();
++		}
++
++		logger.info("Using dataset: " + JSONUtils.toJSON(dataSet));
++
++		AnalysisRequest request = getSparkAnalysisRequest(dataSet);
++		logger.info("Using analysis request: " + JSONUtils.toJSON(request));
++
++		logger.info("Starting analysis...");
++		AnalysisResponse response = analysisManager.runAnalysis(request);
++		Assert.assertNotNull(response);
++		String requestId = response.getId();
++		Assert.assertNotNull(requestId);
++		logger.info("Request id is " + requestId + ".");
++
++		logger.info("Waiting for request to finish");
++		AnalysisRequestStatus status = null;
++		int maxPolls = MAX_NUMBER_OF_POLLS;
++		do {
++			status = analysisManager.getAnalysisRequestStatus(requestId);
++			logger.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
++			maxPolls--;
++			try {
++				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
++			} catch (InterruptedException e) {
++				logger.log(Level.INFO, "Exception thrown: ", e);
++			}
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++		if (maxPolls == 0) {
++			logger.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
++		}
++		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, status.getState());
++
++		List<Annotation> annots = as.getAnnotations(null, status.getRequest().getId());
++		logger.info("Number of annotations created: " + annots.size());
++		Assert.assertTrue("No annotations have been created.", annots.size() > 0);
++
++		logger.log(Level.INFO, "Request ''{0}'' is finished.", requestId);
++
++		discoveryServicesManager.deleteDiscoveryService(DISCOVERY_SERVICE_ID);
++	}
++
++	@Test
++	public void testLocalSparkClusterWithLocalDataFile() throws Exception{
++		runSparkServiceTest(getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkSummaryStatisticsService(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
++	}
++
++	@Test
++	public void testLocalSparkClusterWithLocalDataFileAndDiscoveryServiceRequest() throws Exception{
++		runSparkServiceTest(getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkDiscoveryServiceExample(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java
+new file mode 100755
+index 0000000..4168b0e
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java
+@@ -0,0 +1,54 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.integrationtest.metadata.models;
++
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.integrationtest.metadata.MetadataStoreTestBase;
++import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
++import org.apache.atlas.odf.api.metadata.models.Database;
++import org.apache.atlas.odf.api.metadata.models.Table;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++
++public class CachedMetadataStoreTest extends TimerTestBase {
++	static protected Logger logger = ODFTestLogger.get();
++
++	@Test
++	public void testMetaDataCache() throws Exception {
++		// Note that only a subset of the metadata store test cases are used here because the MetaDataCache does not support queries
++		WritableMetadataStore mds = MetadataStoreTestBase.getWritableMetadataStore();
++		mds.resetAllData();
++		mds.createSampleData();
++		MetadataStoreTestBase.createAdditionalTestData(mds);
++	
++		Database database = MetadataStoreTestBase.getDatabaseTestObject(mds);
++		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, database)), database);
++
++		Table table = MetadataStoreTestBase.getTableTestObject(mds);
++		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, table)), table); 
++
++		DataFileFolder folder = MetadataStoreTestBase.getDataFileFolderTestObject(mds);
++		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, folder)), folder);
++
++		DataFile file = MetadataStoreTestBase.getDataFileTestObject(mds);
++		MetadataStoreTestBase.checkReferences(mds, file);
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java
+new file mode 100755
+index 0000000..75d41c5
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java
+@@ -0,0 +1,58 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import static org.junit.Assert.assertNotNull;
++
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.notification.NotificationManager;
++
++public class ODFInternalFactoryTest extends TimerTestBase {
++
++	Logger logger = ODFTestLogger.get();
++
++	@Test
++	public void testFactoryInstantiations() throws Exception {
++		try {
++			ODFInternalFactory factory = new ODFInternalFactory();
++			Class<?>[] interfaces = new Class<?>[] { //
++			DiscoveryServiceQueueManager.class, //
++					ControlCenter.class, //
++					AnalysisRequestTrackerStore.class, //
++					ThreadManager.class, //
++					ExecutorServiceFactory.class, //
++					NotificationManager.class, //
++					DiscoveryServiceQueueManager.class, //
++			};
++			for (Class<?> cl : interfaces) {
++				Object o = factory.create(cl);
++				assertNotNull(o);
++				logger.info("Object created for class " + cl.getName() + ": " + o.getClass().getName());
++			}
++		} catch (Exception e) {
++			e.printStackTrace();
++			throw e;
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java
+new file mode 100755
+index 0000000..867f0a9
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java
+@@ -0,0 +1,67 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.engine.SystemHealth;
++import org.junit.After;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.engine.EngineManager;
++
++/**
++ * All JUnit test cases that require proper Kafka setup should inherit from this class.
++ * 
++ *
++ */
++public class ODFTestBase extends TimerTestBase {
++
++	static protected Logger log = ODFTestLogger.get();
++	@Test
++	public void testHealth() {
++		testHealth(true);
++	}
++
++	private void testHealth(boolean kafkaRunning) {
++		log.info("Starting health check...");
++		EngineManager engineManager = new ODFFactory().create().getEngineManager();
++		SystemHealth health = engineManager.checkHealthStatus();
++		if (!kafkaRunning) {
++			Assert.assertEquals(SystemHealth.HealthStatus.ERROR, health.getStatus());
++		} else {
++			Assert.assertEquals(SystemHealth.HealthStatus.OK, health.getStatus());
++		}
++		log.info("Health check finished");
++	}
++
++	@BeforeClass
++	public static void startup() throws Exception {
++		TestEnvironment.startAll();
++	}
++
++	@Before
++	public void setup() throws Exception {
++		testHealth(true);
++	}
++
++	@After
++	public void tearDown() throws Exception {
++		testHealth(true);
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java
+new file mode 100755
+index 0000000..a845157
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java
+@@ -0,0 +1,24 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import java.util.logging.Logger;
++
++public class ODFTestLogger {
++	
++	public static Logger get() {
++		return Logger.getLogger(ODFTestLogger.class.getName());
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java
+new file mode 100755
+index 0000000..525dc83
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java
+@@ -0,0 +1,27 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import org.junit.BeforeClass;
++
++import org.apache.atlas.odf.api.ODFFactory;
++
++public class ODFTestcase extends TimerTestBase {
++	@BeforeClass
++	public static void setupBeforeClass() {
++		TestEnvironment.startAll();
++		// Initialize analysis manager
++		new ODFFactory().create().getAnalysisManager();
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java
+new file mode 100755
+index 0000000..06d407e
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java
+@@ -0,0 +1,67 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInitializer;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++
++/**
++ * The class can be used to start components required for testing.
++ *
++ *
++ */
++public class TestEnvironment {
++
++	static Logger logger = Logger.getLogger(TestEnvironment.class.getName());
++
++	public static String MESSAGING_CLASS = "org.apache.atlas.odf.core.test.messaging.kafka.TestEnvironmentMessagingInitializer";
++
++	public static <T> T createObject(String className, Class<T> clazz) {
++		ClassLoader cl = TestEnvironment.class.getClassLoader();
++		// messaging
++		try {
++			Class<?> tei = cl.loadClass(className);
++			return (T) tei.newInstance();
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An exception occurred when starting the messaging test environment", exc);
++		}
++		return null;
++	}
++
++	public static void start(String className) {
++		TestEnvironmentInitializer initializer = createObject(className, TestEnvironmentInitializer.class);
++		if (initializer != null) {
++			initializer.start();
++		}
++	}
++
++	public static void startMessaging() {
++		if ("true".equals(new ODFInternalFactory().create(Environment.class).getProperty("odf.dont.start.messaging"))) {
++			// do nothing
++			logger.info("Messaging test environment not started because environment variable odf.dont.start.messaging is set");
++		} else {
++			start(MESSAGING_CLASS);
++		}
++	}
++
++	public static void startAll() {
++		startMessaging();
++		ODFInitializer.start();
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java
+new file mode 100755
+index 0000000..b4a0022
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java
+@@ -0,0 +1,22 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++public interface TestEnvironmentInitializer {
++	void start();
++	
++	void stop();
++	
++	String getName();
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java
+new file mode 100755
+index 0000000..68740e4
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java
+@@ -0,0 +1,87 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test;
++
++import java.io.File;
++import java.io.IOException;
++import java.nio.charset.StandardCharsets;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.Map;
++import java.util.Map.Entry;
++import java.util.Set;
++import java.util.logging.Logger;
++
++import org.apache.wink.json4j.JSONException;
++import org.junit.AfterClass;
++import org.junit.Rule;
++import org.junit.rules.Stopwatch;
++import org.junit.runner.Description;
++
++import com.google.common.io.Files;
++
++public class TimerTestBase {
++	static final String logFilePath = "/tmp/odf-test-execution-log.csv";
++	static Map<String, HashMap<String, Long>> testTimeMap = new HashMap<String, HashMap<String, Long>>();
++	final static Logger logger = ODFTestLogger.get();
++
++	@Rule
++	public Stopwatch timeWatcher = new Stopwatch() {
++		@Override
++		protected void finished(long nanos, Description description) {
++			HashMap<String, Long> testMap = testTimeMap.get(description.getClassName());
++			if (testMap == null) {
++				testMap = new HashMap<String, Long>();
++				testTimeMap.put(description.getClassName(), testMap);
++			}
++			testMap.put(description.getMethodName(), (nanos / 1000 / 1000));
++		}
++	};
++
++	@AfterClass
++	public static void tearDownAndLogTimes() throws JSONException {
++		try {
++			File logFile = new File(logFilePath);
++			Set<String> uniqueRows = new HashSet<String>();
++			if (logFile.exists()) {
++				uniqueRows = new HashSet<String>(Files.readLines(logFile, StandardCharsets.UTF_8));
++			}
++
++			for (Entry<String, HashMap<String, Long>> entry : testTimeMap.entrySet()) {
++				for (Entry<String, Long> testEntry : entry.getValue().entrySet()) {
++					String logRow = new StringBuilder().append(testEntry.getKey()).append(",").append(testEntry.getValue()).append(",").append(entry.getKey()).append(",")
++							.append(System.getProperty("odf.build.project.name", "ProjectNameNotDefined")).toString();
++					uniqueRows.add(logRow);
++				}
++			}
++
++			StringBuilder logContent = new StringBuilder();
++			Iterator<String> rowIterator = uniqueRows.iterator();
++			while (rowIterator.hasNext()) {
++				logContent.append(rowIterator.next());
++				if (rowIterator.hasNext()) {
++					logContent.append("\n");
++				}
++			}
++
++			logger.info("Total time consumed by succeeded tests:\n" + logContent.toString());
++			logFile.createNewFile();
++			Files.write(logContent.toString().getBytes("UTF-8"), logFile);
++		} catch (IOException e) {
++			logger.warning("Error writing test execution log");
++			e.printStackTrace();
++		}
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java
+new file mode 100755
+index 0000000..7a1f0ed
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java
+@@ -0,0 +1,114 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.annotation;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Test;
++
++import com.fasterxml.jackson.core.Version;
++import com.fasterxml.jackson.databind.ObjectMapper;
++import com.fasterxml.jackson.databind.module.SimpleModule;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++import org.apache.atlas.odf.json.AnnotationDeserializer;
++import org.apache.atlas.odf.json.AnnotationSerializer;
++
++public class AnnotationExtensionTest extends TimerTestBase {
++
++	static Logger logger = ODFTestLogger.get();
++
++	public static <T> T readJSONObjectFromFileInClasspath(ObjectMapper om, Class<T> cl, String pathToFile, ClassLoader classLoader) {
++		if (classLoader == null) {
++			// use current classloader if not provided
++			classLoader = AnnotationExtensionTest.class.getClassLoader();
++		}
++		InputStream is = classLoader.getResourceAsStream(pathToFile);
++		T result = null;
++		try {
++			result = om.readValue(is, cl);
++		} catch (IOException e) {
++			// assume that this is a severe error since the provided JSONs should be correct
++			throw new RuntimeException(e);
++		}
++
++		return result;
++	}
++
++	@Test
++	public void testWithUtils() throws Exception {
++		testSimple(JSONUtils.getGlobalObjectMapper());
++	}
++
++	@Test
++	public void testWithSeparateObjectMapper() throws Exception {
++		ObjectMapper om = new ObjectMapper();
++		SimpleModule mod = new SimpleModule("annotation module", Version.unknownVersion());
++		mod.addDeserializer(Annotation.class, new AnnotationDeserializer());
++		mod.addSerializer(Annotation.class, new AnnotationSerializer());
++		om.registerModule(mod);
++		testSimple(om);
++	}
++
++	private void testSimple(ObjectMapper om) throws Exception {
++		ExtensionTestAnnotation newTestAnnot = new ExtensionTestAnnotation();
++		String strValue = "newstring1";
++		int intValue = 4237;
++		newTestAnnot.setNewStringProp1(strValue);
++		newTestAnnot.setNewIntProp2(intValue);
++//		String newTestAnnotJSON = om.writeValueAsString(newTestAnnot);
++		String newTestAnnotJSON = JSONUtils.toJSON(newTestAnnot).toString();
++		logger.info("New test annot JSON: " + newTestAnnotJSON);
++
++		logger.info("Deserializing with " + Annotation.class.getSimpleName() + "class as target class");
++		Annotation annot1 = om.readValue(newTestAnnotJSON, Annotation.class);
++		Assert.assertNotNull(annot1);
++		logger.info("Deserialized annotation JSON (target: " + Annotation.class.getSimpleName() + "): " + om.writeValueAsString(annot1));
++		logger.info("Deserialized annotation class (target: " + Annotation.class.getSimpleName() + "): " + annot1.getClass().getName());
++		Assert.assertEquals(ExtensionTestAnnotation.class, annot1.getClass());
++		ExtensionTestAnnotation extAnnot1 = (ExtensionTestAnnotation) annot1;
++		Assert.assertEquals(strValue, extAnnot1.getNewStringProp1());
++		Assert.assertEquals(intValue, extAnnot1.getNewIntProp2());
++
++		/* This does not make sense as you would never enter ExtensionTestAnnotation.class as deserialization target
++		 * which would enforce usage of the standard Bean serializer (since no serializer is registered for this specific class -> jsonProperties can not be mapped
++		logger.info("Calling deserialization with " + ExtensionTestAnnotation.class.getSimpleName() + " as target");
++		ExtensionTestAnnotation annot2 = om.readValue(newTestAnnotJSON, ExtensionTestAnnotation.class);
++		Assert.assertNotNull(annot2);
++		logger.info("Deserialized annotation JSON (target: " + ExtensionTestAnnotation.class.getSimpleName() + "): " + om.writeValueAsString(annot2));
++		logger.info("Deserialized annotation class (target: " + ExtensionTestAnnotation.class.getSimpleName() + "): " + annot2.getClass().getName());
++		Assert.assertEquals(ExtensionTestAnnotation.class, annot2.getClass());
++		String s = annot2.getNewStringProp1();
++		Assert.assertEquals(strValue, annot2.getNewStringProp1());
++		Assert.assertEquals(intValue, annot2.getNewIntProp2()); */
++
++		logger.info("Processing profiling annotation...");
++		Annotation unknownAnnot = readJSONObjectFromFileInClasspath(om, Annotation.class, "org/apache/atlas/odf/core/test/annotation/annotexttest1.json", null);
++		Assert.assertNotNull(unknownAnnot);
++		logger.info("Read Unknown annotation: " + unknownAnnot.getClass().getName());
++		Assert.assertEquals(ProfilingAnnotation.class, unknownAnnot.getClass());
++
++		logger.info("Read profiling annotation: " + om.writeValueAsString(unknownAnnot));
++		JSONObject jsonPropertiesObj = new JSONObject(unknownAnnot.getJsonProperties());
++		Assert.assertEquals("newProp1Value", jsonPropertiesObj.get("newProp1"));
++		Assert.assertEquals((Integer) 4237, jsonPropertiesObj.get("newProp2"));
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java
+new file mode 100755
+index 0000000..b65ce17
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java
+@@ -0,0 +1,62 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.annotation;
++
++import java.util.List;
++import java.util.UUID;
++
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++
++public class AnnotationStoreTest extends ODFTestcase {
++
++	private AnnotationStore createAnnotationStore() {
++		return new DefaultStatusQueueStore();
++	}
++	
++	@Test
++	public void testStoreProfilingAnnotation() throws Exception {
++		AnnotationStore as = createAnnotationStore();
++		
++		String modRef1Id = UUID.randomUUID().toString();
++		MetaDataObjectReference mdoref1 = new MetaDataObjectReference();
++		mdoref1.setId(modRef1Id);
++		
++		ProfilingAnnotation annot1 = new ProfilingAnnotation();
++		annot1.setJsonProperties("{\"a\": \"b\"}");
++		annot1.setAnnotationType("AnnotType1");
++		annot1.setProfiledObject(mdoref1);
++
++		MetaDataObjectReference annot1Ref = as.store(annot1);
++		Assert.assertNotNull(annot1Ref.getId());
++		List<Annotation> retrievedAnnots = as.getAnnotations(mdoref1, null);
++		Assert.assertEquals(1, retrievedAnnots.size());
++		
++		Annotation retrievedAnnot = retrievedAnnots.get(0);
++		Assert.assertTrue(annot1 != retrievedAnnot);
++		Assert.assertTrue(retrievedAnnot instanceof ProfilingAnnotation);
++		ProfilingAnnotation retrievedProfilingAnnotation = (ProfilingAnnotation) retrievedAnnot;
++		Assert.assertEquals(modRef1Id, retrievedProfilingAnnotation.getProfiledObject().getId());
++		Assert.assertEquals(annot1Ref, retrievedAnnot.getReference());
++		
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java
+new file mode 100755
+index 0000000..cd8f695
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java
+@@ -0,0 +1,39 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.annotation;
++
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++
++class ExtensionTestAnnotation extends ProfilingAnnotation {
++
++	private String newStringProp1;
++	private int newIntProp2;
++
++	public String getNewStringProp1() {
++		return newStringProp1;
++	}
++
++	public void setNewStringProp1(String newStringProp1) {
++		this.newStringProp1 = newStringProp1;
++	}
++
++	public int getNewIntProp2() {
++		return newIntProp2;
++	}
++
++	public void setNewIntProp2(int newIntProp2) {
++		this.newIntProp2 = newIntProp2;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java
+new file mode 100755
+index 0000000..f65e3ad
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java
+@@ -0,0 +1,147 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.annotation;
++
++import java.util.ArrayList;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++public class TestSyncDiscoveryServiceWritingExtendedAnnotations extends DiscoveryServiceBase implements SyncDiscoveryService {
++	Logger logger = ODFTestLogger.get();
++
++	public static class SyncDiscoveryServiceAnnotation extends ProfilingAnnotation {
++		private String prop1 = "";
++		private int prop2 = 4237;
++		private MyObject prop3 = new MyObject();
++
++		public String getProp1() {
++			return prop1;
++		}
++
++		public void setProp1(String prop1) {
++			this.prop1 = prop1;
++		}
++
++		public int getProp2() {
++			return prop2;
++		}
++
++		public void setProp2(int prop2) {
++			this.prop2 = prop2;
++		}
++
++		public MyObject getProp3() {
++			return prop3;
++		}
++
++		public void setProp3(MyObject prop3) {
++			this.prop3 = prop3;
++		}
++
++	}
++
++	public static class MyObject {
++		private String anotherProp = "";
++
++		public String getAnotherProp() {
++			return anotherProp;
++		}
++
++		public void setAnotherProp(String anotherProp) {
++			this.anotherProp = anotherProp;
++		}
++
++		private MyOtherObject yetAnotherProp = new MyOtherObject();
++
++		public MyOtherObject getYetAnotherProp() {
++			return yetAnotherProp;
++		}
++
++		public void setYetAnotherProp(MyOtherObject yetAnotherProp) {
++			this.yetAnotherProp = yetAnotherProp;
++		}
++
++	}
++
++	public static class MyOtherObject {
++		private String myOtherObjectProperty = "";
++
++		public String getMyOtherObjectProperty() {
++			return myOtherObjectProperty;
++		}
++
++		public void setMyOtherObjectProperty(String myOtherObjectProperty) {
++			this.myOtherObjectProperty = myOtherObjectProperty;
++		}
++
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		try {
++			MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
++
++			List<Annotation> annotations = new ArrayList<>();
++			SyncDiscoveryServiceAnnotation annotation1 = new SyncDiscoveryServiceAnnotation();
++			String annotation1_prop1 = "prop1_1_" + dataSetRef.getUrl();
++			annotation1.setProp1(annotation1_prop1);
++			annotation1.setProp2(annotation1_prop1.hashCode());
++			annotation1.setProfiledObject(dataSetRef);
++			MyObject mo1 = new MyObject();
++			MyOtherObject moo1 = new MyOtherObject();
++			moo1.setMyOtherObjectProperty("nestedtwolevels" + annotation1_prop1);
++			mo1.setYetAnotherProp(moo1);
++			mo1.setAnotherProp("nested" + annotation1_prop1);
++			annotation1.setProp3(mo1);
++			annotations.add(annotation1);
++
++			SyncDiscoveryServiceAnnotation annotation2 = new SyncDiscoveryServiceAnnotation();
++			String annotation2_prop1 = "prop1_2_" + dataSetRef.getUrl();
++			annotation2.setProp1(annotation2_prop1);
++			annotation2.setProp2(annotation2_prop1.hashCode());
++			annotation2.setProfiledObject(dataSetRef);
++			MyObject mo2 = new MyObject();
++			MyOtherObject moo2 = new MyOtherObject();
++			moo2.setMyOtherObjectProperty("nestedtwolevels" + annotation2_prop1);
++			mo2.setYetAnotherProp(moo2);
++			mo2.setAnotherProp("nested" + annotation2_prop1);
++			annotation2.setProp3(mo2);
++			annotations.add(annotation2);
++
++			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
++			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++			DiscoveryServiceResult dsResult = new DiscoveryServiceResult();
++			dsResult.setAnnotations(annotations);
++			resp.setResult(dsResult);
++			resp.setDetails(this.getClass().getName() + ".runAnalysis finished OK");
++
++			logger.info("Returning from discovery service " + this.getClass().getSimpleName() + " with result: " + JSONUtils.toJSON(resp));
++			return resp;
++		} catch (Exception exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java
+new file mode 100755
+index 0000000..91b544c
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java
+@@ -0,0 +1,63 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.annotation;
++
++import java.util.ArrayList;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++public class TestSyncDiscoveryServiceWritingJsonAnnotations extends DiscoveryServiceBase implements SyncDiscoveryService {
++	Logger logger = ODFTestLogger.get();
++	private String annotationResult = Utils.getInputStreamAsString(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json"), "UTF-8");
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		try {
++			MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
++
++			List<Annotation> annotations = new ArrayList<>();
++			ProfilingAnnotation annotation1 = new ProfilingAnnotation();
++			annotation1.setProfiledObject(dataSetRef);
++			annotation1.setJsonProperties(annotationResult);
++			annotation1.setAnnotationType("JsonAnnotationWriteTest");
++			annotation1.setJavaClass("JsonAnnotationWriteTest");
++			annotations.add(annotation1);
++
++			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
++			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++			DiscoveryServiceResult dsResult = new DiscoveryServiceResult();
++			dsResult.setAnnotations(annotations);
++			resp.setResult(dsResult);
++			resp.setDetails(this.getClass().getName() + ".runAnalysis finished OK");
++
++			logger.info("Returning from discovery service " + this.getClass().getSimpleName() + " with result: " + JSONUtils.toJSON(resp));
++			return resp;
++		} catch (Exception exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java
+new file mode 100755
+index 0000000..b1d2518
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java
+@@ -0,0 +1,165 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.configuration;
++
++import java.io.IOException;
++import java.io.InputStream;
++import java.util.Map;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.configuration.ConfigManager;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import com.fasterxml.jackson.core.JsonParseException;
++import com.fasterxml.jackson.databind.JsonMappingException;
++import com.fasterxml.jackson.databind.ObjectMapper;
++
++/**
++ * this test uses a mocked storage therefore no zookeeper is required
++ */
++public class ODFConfigurationTest extends ODFTestcase {
++
++	Logger logger = ODFTestLogger.get();
++
++	@Before
++	public void setupDefaultConfig() throws JsonParseException, JsonMappingException, IOException, ValidationException, JSONException {
++		logger.info("reset config to default");
++		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
++		ConfigContainer defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
++		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
++		configManager.updateConfigContainer(defaultConfig);
++	}
++
++	@Test
++	public void testUserDefinedMerge() throws JsonParseException, JsonMappingException, IOException {
++		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
++		ConfigContainer defaultConfig;
++		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
++		//set testProps to defaultValues to be overwritten
++		defaultConfig.getOdf().getUserDefined().put("testProp", "defaultValue");
++		defaultConfig.getOdf().getUserDefined().put("testProp2", "defaultValue");
++		logger.info("Read config: " + defaultConfig);
++
++		//config example with userdefined property testProp to 123
++		String value = "{\r\n\t\"odf\" : {\r\n\t\"userDefined\" : {\r\n\t\t\"testProp\" : 123\r\n\t}\r\n}\r\n}\r\n";
++		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
++		Utils.mergeODFPOJOs(defaultConfig, props);
++		logger.info("Mergded config: " + defaultConfig);
++
++		Assert.assertEquals(123, defaultConfig.getOdf().getUserDefined().get("testProp"));
++		Assert.assertEquals("defaultValue", defaultConfig.getOdf().getUserDefined().get("testProp2"));
++	}
++
++	@Test
++	public void testValidation() throws JsonParseException, JsonMappingException, IOException {
++		boolean exceptionOccured = false;
++		String value = "{\r\n\t\"odf\" : {\r\n\t\t\"discoveryServiceWatcherWaitMs\" : -5\r\n\t}\r\n}\r\n";
++		try {
++			ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
++			props.validate();
++		} catch (ValidationException e) {
++			exceptionOccured = true;
++		}
++
++		Assert.assertTrue(exceptionOccured);
++	}
++
++	@Test
++	public void testMerge() throws JsonParseException, JsonMappingException, IOException {
++		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
++		ConfigContainer defaultConfig;
++		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
++		//config example with ODF - queueConsumerWaitMs property value 777
++		String value = "{\r\n\t\"odf\" : {\r\n\t\t\"discoveryServiceWatcherWaitMs\" : 777\r\n\t}\r\n}\r\n";
++		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
++		Utils.mergeODFPOJOs(defaultConfig, props);
++
++		// TODOCONFIG, move next line to kafka tests
++		// Assert.assertEquals(777, defaultConfig.getOdf().getQueueConsumerWaitMs().intValue());
++	}
++
++	@Test
++	public void testDeepMerge() throws JsonParseException, JsonMappingException, IOException {
++		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
++		ConfigContainer defaultConfig;
++		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
++		//config example with ODF - kafkaConsumer - offsetsStorage property value TEST. All other values for the kafkaConsumer should stay the same!
++		String value = "{\r\n\t\"odf\" : {\r\n\"messagingConfiguration\": { \"type\": \"" + KafkaMessagingConfiguration.class.getName()
++				+ "\", \t\t\"kafkaConsumerConfig\" : { \r\n\t\t\t\"offsetsStorage\" : \"TEST\"\r\n\t\t}\r\n\t}\r\n}}\r\n";
++		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
++		Utils.mergeODFPOJOs(defaultConfig, props);
++
++		// TODOCONFIG
++		//		Assert.assertEquals("TEST", defaultConfig.getOdf().getKafkaConsumerConfig().getOffsetsStorage());
++		//make sure the rest is still default
++		//		Assert.assertEquals(400, defaultConfig.getOdf().getKafkaConsumerConfig().getZookeeperSessionTimeoutMs().intValue());
++	}
++
++	@Test
++	public void testGet() {
++		Assert.assertTrue(new ODFFactory().create().getSettingsManager().getODFSettings().isReuseRequests());
++	}
++
++	@Test
++	public void testPut() throws InterruptedException, IOException, ValidationException, JSONException, ServiceNotFoundException {
++		SettingsManager config = new ODFFactory().create().getSettingsManager();
++		String propertyId = "my_dummy_test_property";
++		int testNumber = 123;
++		Map<String, Object> cont = config.getUserDefinedConfig();
++		cont.put(propertyId, testNumber);
++		config.updateUserDefined(cont);
++		Assert.assertEquals(testNumber, config.getUserDefinedConfig().get(propertyId));
++
++		String testString = "test";
++		cont.put(propertyId, testString);
++		config.updateUserDefined(cont);
++
++		Assert.assertEquals(testString, config.getUserDefinedConfig().get(propertyId));
++
++		JSONObject testJson = new JSONObject();
++		testJson.put("testProp", "test");
++		cont.put(propertyId, testJson);
++		config.updateUserDefined(cont);
++
++		Assert.assertEquals(testJson, config.getUserDefinedConfig().get(propertyId));
++
++		ODFSettings settings = config.getODFSettings();
++		logger.info("Last update object: " + JSONUtils.toJSON(settings));
++		Assert.assertNotNull(settings);
++		Assert.assertNotNull(settings.getUserDefined());
++		Assert.assertNotNull(settings.getUserDefined().get(propertyId));
++		logger.info("User defined object: " + settings.getUserDefined().get(propertyId).getClass());
++		@SuppressWarnings("unchecked")
++		Map<String, Object> notifiedNestedJSON = (Map<String, Object>) settings.getUserDefined().get(propertyId);
++		Assert.assertNotNull(notifiedNestedJSON.get("testProp"));
++		Assert.assertTrue(notifiedNestedJSON.get("testProp") instanceof String);
++		Assert.assertEquals("test", notifiedNestedJSON.get("testProp"));
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java
+new file mode 100755
+index 0000000..aea9a30
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java
+@@ -0,0 +1,83 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.configuration;
++
++import java.util.logging.Logger;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.api.settings.SparkConfig;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class PasswordEncryptionTest extends TimerTestBase {
++	Logger logger = ODFTestLogger.get();
++	private static final String SPARK_PASSWORD_CONFIG = "spark.authenticate.secret";
++
++	@Test
++	public void testGeneralPasswordEncryption() throws Exception {
++		SettingsManager settings = new ODFFactory().create().getSettingsManager();
++		ODFSettings settingsWithPlainPasswords = settings.getODFSettingsHidePasswords();
++		settingsWithPlainPasswords.setOdfPassword("newOdfPassword");
++		logger.info("Settings with plain password: " + JSONUtils.toJSON(settingsWithPlainPasswords));
++		settings.updateODFSettings(settingsWithPlainPasswords);
++
++		ODFSettings settingsWithHiddenPasswords = settings.getODFSettingsHidePasswords();
++		String hiddenPasswordIdentifyier = "***hidden***";
++		Assert.assertEquals(hiddenPasswordIdentifyier, settingsWithHiddenPasswords.getOdfPassword());
++		logger.info("Settings with hidden password: " + JSONUtils.toJSON(settingsWithHiddenPasswords));
++
++		ODFSettings settingsWithEncryptedPassword = settings.getODFSettings();
++		Assert.assertEquals("newOdfPassword", Encryption.decryptText(settingsWithEncryptedPassword.getOdfPassword()));
++		logger.info("Settings with encrypted password: " + JSONUtils.toJSON(settingsWithEncryptedPassword));
++
++		// When overwriting settings with hidden passwords, encrypted passwords must be kept internally
++		settings.updateODFSettings(settingsWithHiddenPasswords);
++		settingsWithEncryptedPassword = settings.getODFSettings();
++		Assert.assertEquals("newOdfPassword", Encryption.decryptText(settingsWithEncryptedPassword.getOdfPassword()));
++	}
++
++	@Test
++	public void testSparkConfigEncryption() throws Exception {
++		SettingsManager settings = new ODFFactory().create().getSettingsManager();
++		SparkConfig plainSparkConfig = new SparkConfig();
++		plainSparkConfig.setConfig(SPARK_PASSWORD_CONFIG, "plainConfigValue");
++		ODFSettings settingsWithPlainPasswords = settings.getODFSettings();
++		settingsWithPlainPasswords.setSparkConfig(plainSparkConfig);;
++		logger.info("Settings with plain password: " + JSONUtils.toJSON(settingsWithPlainPasswords));
++		settings.updateODFSettings(settingsWithPlainPasswords);
++
++		ODFSettings settingsWithHiddenPasswords = settings.getODFSettingsHidePasswords();
++		String hiddenPasswordIdentifyier = "***hidden***";
++		String hiddenConfigValue = (String) settingsWithHiddenPasswords.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
++		Assert.assertEquals(hiddenPasswordIdentifyier, hiddenConfigValue);
++		logger.info("Config with hidden password: " + JSONUtils.toJSON(settingsWithHiddenPasswords));
++
++		ODFSettings settingsWithEncryptedPassword = settings.getODFSettings();
++		String encryptedConfigValue = (String) settingsWithEncryptedPassword.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
++		Assert.assertEquals("plainConfigValue", Encryption.decryptText(encryptedConfigValue));
++		logger.info("Config with encrypted password: " + JSONUtils.toJSON(settingsWithEncryptedPassword));
++
++		// When overwriting settings with hidden passwords, encrypted passwords must be kept internally
++		settings.updateODFSettings(settingsWithHiddenPasswords);
++		encryptedConfigValue = (String) settingsWithEncryptedPassword.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
++		Assert.assertEquals("plainConfigValue", Encryption.decryptText(encryptedConfigValue));
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java
+new file mode 100755
+index 0000000..3db5778
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java
+@@ -0,0 +1,103 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.configuration;
++
++import java.util.Collections;
++
++import org.apache.atlas.odf.api.settings.validation.EnumValidator;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.configuration.ConfigManager;
++import org.apache.atlas.odf.core.configuration.ServiceValidator;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.settings.validation.ImplementationValidator;
++import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
++import org.apache.atlas.odf.api.settings.validation.PropertyValidator;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++import org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryServiceWritingAnnotations1;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class ValidationTests extends TimerTestBase {
++
++	@Test
++	public void testEnum() {
++		String[] vals = new String[] { "test", "test2" };
++		String correct = "test";
++		String incorrect = "fail";
++
++		Assert.assertTrue(validateTest(correct, new EnumValidator(vals)));
++		Assert.assertFalse(validateTest(incorrect, new EnumValidator(vals)));
++	}
++
++	@Test
++	public void testImplementation() {
++		String correct = TestAsyncDiscoveryServiceWritingAnnotations1.class.getName();
++		String incorrect = "dummyClass";
++		Assert.assertTrue(validateTest(correct, new ImplementationValidator()));
++		Assert.assertFalse(validateTest(incorrect, new ImplementationValidator()));
++	}
++
++	@Test
++	public void testService() throws Exception {
++		String s = "{\r\n" + 
++				"			\"id\": \"asynctestservice\",\r\n" + 
++				"			\"name\": \"Async test\",\r\n" + 
++				"			\"description\": \"The async test service\",\r\n" + 
++				"			\"endpoint\": {\r\n" + 
++				"				\"runtimeName\": \"Java\",\r\n" + 
++				"				\"className\": \"TestAsyncDiscoveryService1\"\r\n" +
++				"			}\r\n" + 
++				"		}";
++		
++		DiscoveryServiceProperties newService = JSONUtils.fromJSON(s, DiscoveryServiceProperties.class);
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		//ODFConfig odfConfig = new ODFFactory().create(ODFConfiguration.class).getODFConfig();
++
++		ConfigContainer new1 = new ConfigContainer();
++		new1.setRegisteredServices(Collections.singletonList(newService));
++		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
++		configManager.updateConfigContainer(new1);
++		
++		DiscoveryServiceProperties correct = discoveryServicesManager.getDiscoveryServicesProperties().get(0);
++		Assert.assertEquals("asynctestservice", correct.getId());
++		correct.setId("newId");
++		DiscoveryServiceProperties incorrect = new DiscoveryServiceProperties();
++		Assert.assertTrue(validateTest(correct, new ServiceValidator()));
++		Assert.assertFalse(validateTest(incorrect, new ServiceValidator()));
++	}
++
++	@Test
++	public void testNumber() {
++		int correct = 5;
++		int incorrect = -5;
++		Assert.assertTrue(validateTest(correct, new NumberPositiveValidator()));
++		Assert.assertFalse(validateTest(incorrect, new NumberPositiveValidator()));
++	}
++
++	private boolean validateTest(Object value, PropertyValidator validator) {
++		try {
++			validator.validate(null, value);
++			return true;
++		} catch (ValidationException ex) {
++			return false;
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java
+new file mode 100755
+index 0000000..4fa2eda
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java
+@@ -0,0 +1,139 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import java.text.MessageFormat;
++import java.util.Arrays;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.settings.MessagingConfiguration;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class AnalysisProcessingTests extends ODFTestcase {
++	Logger logger = ODFTestLogger.get();
++
++	@Test
++	public void testAnalysisProcessingAfterShutdown() throws Exception {
++		final SettingsManager config = new ODFFactory().create().getSettingsManager();
++		final ODFSettings odfSettings = config.getODFSettings();
++		final MessagingConfiguration messagingConfiguration = odfSettings.getMessagingConfiguration();
++		final Long origRequestRetentionMs = messagingConfiguration.getAnalysisRequestRetentionMs();
++		messagingConfiguration.setAnalysisRequestRetentionMs(300000l);
++		config.updateODFSettings(odfSettings);
++
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
++		AnalysisRequest req = tracker.getRequest();
++		req.setDiscoveryServiceSequence(Arrays.asList("asynctestservice"));
++		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
++		final AnalysisResponse startRequest = cc.startRequest(req);
++		logger.info("Analysis :" + startRequest.getId());
++
++		Assert.assertNull(startRequest.getOriginalRequest());
++		Assert.assertFalse(startRequest.isInvalidRequest());
++		final AnalysisResponse duplicate = cc.startRequest(req);
++		Assert.assertNotNull(duplicate.getOriginalRequest());
++		Assert.assertEquals(startRequest.getId(), duplicate.getId());
++		logger.info("Analysis1 duplciate :" + duplicate.getId());
++
++		final AnalysisCancelResult cancelRequest = cc.cancelRequest(startRequest.getId());
++		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
++
++		cc.getQueueManager().stop();
++
++		AnalysisResponse response2 = cc.startRequest(req);
++		logger.info("Analysis2:" + response2.getId());
++		AnalysisRequestStatus requestStatus = cc.getRequestStatus(response2.getId());
++		int maxWait = 20;
++
++		int currentWait = 0;
++		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.ACTIVE) {
++			Thread.sleep(100);
++			currentWait++;
++			requestStatus = cc.getRequestStatus(response2.getId());
++		}
++		logger.info("THREAD ACTIVE, KILL IT!");
++
++		cc.getQueueManager().start();
++		logger.info("restarted");
++		Assert.assertNull(response2.getOriginalRequest());
++		Assert.assertFalse(response2.isInvalidRequest());
++
++		messagingConfiguration.setAnalysisRequestRetentionMs(origRequestRetentionMs);
++		config.updateODFSettings(odfSettings);
++
++		currentWait = 0;
++		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.FINISHED) {
++			Thread.sleep(100);
++			requestStatus = cc.getRequestStatus(response2.getId());
++		}
++		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, requestStatus.getState());
++	}
++
++	@Test
++	public void testRequestWithAnnotationTypes() throws Exception {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
++		AnalysisRequest req = tracker.getRequest();
++		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
++		List<String> annotationTypes = Arrays.asList(new String[] { "AsyncTestDummyAnnotation" });
++		req.setAnnotationTypes(annotationTypes);
++		logger.info(MessageFormat.format("Running discovery request for annotation type {0}.", annotationTypes));
++		AnalysisResponse resp = cc.startRequest(req);
++		logger.info(MessageFormat.format("Started request id {0}.", resp.getId()));
++		Assert.assertNotNull(resp.getId());
++		Assert.assertFalse(resp.isInvalidRequest());
++
++		int currentWait = 0;
++		int maxWait = 20;
++		AnalysisRequestStatus requestStatus = cc.getRequestStatus(resp.getId());
++		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.FINISHED) {
++			Thread.sleep(100);
++			requestStatus = cc.getRequestStatus(resp.getId());
++		}
++		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, requestStatus.getState());
++		Assert.assertEquals("Generated service has incorrect number of elements.", 1, requestStatus.getRequest().getDiscoveryServiceSequence().size());
++		Assert.assertEquals("Generated service sequence differs from expected value.", "asynctestservice", requestStatus.getRequest().getDiscoveryServiceSequence().get(0));
++	}
++
++	@Test
++	public void testRequestWithMissingAnnotationTypes() throws Exception {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
++		AnalysisRequest req = tracker.getRequest();
++		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
++		List<String> annotationTypes = Arrays.asList(new String[] { "noServiceExistsForThisAnnotationType" });
++		req.setAnnotationTypes(annotationTypes);
++		logger.info(MessageFormat.format("Running discovery request for non-existing annotation type {0}.", annotationTypes));
++		AnalysisResponse resp = cc.startRequest(req);
++		Assert.assertTrue(resp.isInvalidRequest());
++		Assert.assertEquals("Unexpected error message.", "No suitable discovery services found to create the requested annotation types.", resp.getDetails());
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java
+new file mode 100755
+index 0000000..fd39e15
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java
+@@ -0,0 +1,104 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import java.util.Collections;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++
++public class AnalysisRequestCancellationTest extends ODFTestcase {
++
++	Logger logger = ODFTestLogger.get();
++
++	AnalysisRequestTracker generateTracker(String id, STATUS status) {
++		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
++		Utils.setCurrentTimeAsLastModified(tracker);
++		tracker.setNextDiscoveryServiceRequest(0);
++		AnalysisRequest req = new AnalysisRequest();
++		req.setId(id);
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId("DataSet" + id);
++		req.setDataSets(Collections.singletonList(ref));
++		tracker.setRequest(req);
++		tracker.setStatus(status);
++		return tracker;
++	}
++
++	@Test
++	public void testRequestCancellationNotFoundFailure() {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisCancelResult cancelRequest = cc.cancelRequest("dummy_id");
++		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.NOT_FOUND);
++	}
++
++	@Test
++	public void testRequestCancellationWrongStateFailure() {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
++		String testId = "test_id1";
++		AnalysisRequestTracker tracker = null;
++		AnalysisCancelResult cancelRequest = null;
++		
++		tracker = generateTracker(testId, STATUS.FINISHED);
++		store.store(tracker);
++		cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
++
++		tracker = generateTracker(testId, STATUS.ERROR);
++		store.store(tracker);
++		cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
++
++		tracker = generateTracker(testId, STATUS.CANCELLED);
++		store.store(tracker);
++		cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
++	}
++
++	@Test
++	public void testRequestCancellationSuccess() {
++		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
++		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
++		String testId = "test_id2";
++
++		AnalysisRequestTracker tracker = generateTracker(testId, STATUS.INITIALIZED);
++		store.store(tracker);
++		AnalysisCancelResult cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
++
++		tracker = generateTracker(testId, STATUS.IN_DISCOVERY_SERVICE_QUEUE);
++		store.store(tracker);
++		cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
++
++		tracker = generateTracker(testId, STATUS.DISCOVERY_SERVICE_RUNNING);
++		store.store(tracker);
++		cancelRequest = cc.cancelRequest(testId);
++		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
++}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java
+new file mode 100755
+index 0000000..7eb46d8
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java
+@@ -0,0 +1,105 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertNotNull;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++
++public class AnalysisRequestTrackerStoreTest extends ODFTestcase {
++
++	Logger logger = ODFTestLogger.get();
++
++	AnalysisRequestTracker generateTracker(String id, STATUS status) {
++		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
++		Utils.setCurrentTimeAsLastModified(tracker);
++		tracker.setNextDiscoveryServiceRequest(0);
++		AnalysisRequest req = new AnalysisRequest();
++		req.setId(id);
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId("DataSet" + id);
++		req.setDataSets(Collections.singletonList(ref));
++		tracker.setRequest(req);
++		tracker.setStatus(status);
++		return tracker;
++	}
++
++	@Test
++	public void testStore() throws Exception {
++		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
++		assertNotNull(store);
++		int MAX_TRACKERS = 50;
++		List<AnalysisRequestTracker> trackers1 = new ArrayList<AnalysisRequestTracker>();
++		STATUS lastStatus = STATUS.IN_DISCOVERY_SERVICE_QUEUE;
++		for (int i = 0; i < MAX_TRACKERS; i++) {
++			trackers1.add(generateTracker("STORETEST_ID" + i, lastStatus));
++		}
++
++		logger.info("Storing " + MAX_TRACKERS + " Trackers");
++		long pass1Start = System.currentTimeMillis();
++		for (AnalysisRequestTracker tracker : trackers1) {
++			store.store(tracker);
++		}
++		long pass1End = System.currentTimeMillis();
++
++		logger.info("Storing " + MAX_TRACKERS + " Trackers again with new status");
++
++		lastStatus = STATUS.FINISHED;
++		List<AnalysisRequestTracker> trackers2 = new ArrayList<AnalysisRequestTracker>();
++		for (int i = 0; i < MAX_TRACKERS; i++) {
++			trackers2.add(generateTracker("STORETEST_ID" + i, lastStatus));
++		}
++		long pass2Start = System.currentTimeMillis();
++		for (AnalysisRequestTracker tracker : trackers2) {
++			store.store(tracker);
++		}
++		long pass2End = System.currentTimeMillis();
++
++		Thread.sleep(2000);
++		logger.info("Querying and checking " + MAX_TRACKERS + " Trackers");
++
++		long queryStart = System.currentTimeMillis();
++
++		for (int i = 0; i < MAX_TRACKERS; i++) {
++			final String analysisRequestId = "STORETEST_ID" + i;
++			AnalysisRequestTracker tracker = store.query(analysisRequestId);
++			assertNotNull(tracker);
++			assertEquals(1, tracker.getRequest().getDataSets().size());
++			MetaDataObjectReference ref = new MetaDataObjectReference();
++			ref.setId("DataSet" + analysisRequestId);
++			assertEquals(tracker.getRequest().getDataSets().get(0), ref);
++			assertEquals(lastStatus, tracker.getStatus());
++		}
++		long queryEnd = System.currentTimeMillis();
++
++		System.out.println("First pass: " + (pass1End - pass1Start) + "ms, second pass: " + (pass2End - pass2Start) + "ms, query: " + (queryEnd - queryStart) + "ms");
++
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java
+new file mode 100755
+index 0000000..347fb84
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java
+@@ -0,0 +1,158 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.controlcenter.DeclarativeRequestMapper;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++import org.junit.Assert;
++import org.junit.Test;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.controlcenter.ControlCenter;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++
++public class DeclarativeRequestMapperTest extends ODFTestBase {
++	final private static String SERVICE_CLASSNAME = "TestAsyncDiscoveryService1";
++	final private static String[] EXPECTED_SERVICE_SEQUENCES = new String[] { "pre3,ser1", "alt1,ser1", "pre4,pre1,ser1", 
++			"pre3,ser1,ser3", "pre3,ser1,ser5", "alt1,ser1,ser3", "alt1,ser1,ser5", "pre3,pre2,ser4", "alt1,pre2,ser4", 
++			"pre4,pre1,ser1,ser3", "pre4,pre1,ser1,ser5", "pre3,ser1,alt1,ser3", "pre3,ser1,pre2,ser4", "pre3,ser1,alt1,ser5" };
++	private Logger logger = Logger.getLogger(ControlCenter.class.getName());
++
++	private static void createDiscoveryService(String serviceId, String[] resultingAnnotationTypes, String[] prerequisiteAnnotationTypes, String[] supportedObjectTypes) throws ValidationException, JSONException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
++		DiscoveryServiceJavaEndpoint dse = new DiscoveryServiceJavaEndpoint();
++		dse.setClassName(SERVICE_CLASSNAME);
++		dsProperties.setEndpoint(JSONUtils.convert(dse, DiscoveryServiceEndpoint.class));
++		dsProperties.setId(serviceId);
++		dsProperties.setName(serviceId + " Discovery Service");
++		dsProperties.setPrerequisiteAnnotationTypes(Arrays.asList(prerequisiteAnnotationTypes));
++		dsProperties.setResultingAnnotationTypes(Arrays.asList(resultingAnnotationTypes));
++		dsProperties.setSupportedObjectTypes(Arrays.asList(supportedObjectTypes));
++		discoveryServicesManager.createDiscoveryService(dsProperties);
++	}
++
++	private void deleteDiscoveryService(String serviceId, boolean failOnError) throws ValidationException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		try {
++			discoveryServicesManager.deleteDiscoveryService(serviceId);
++		}
++		catch (ServiceNotFoundException e) {
++			if (failOnError) {
++				Assert.fail("Error deleting discovery services.");
++			}
++		}		
++	}
++
++	private void deleteDiscoveryServices(boolean failOnError) throws ValidationException {
++		List<String> serviceIds = Arrays.asList(new String[] { "ser1", "ser2", "ser3", "ser4", "ser5", "pre1", "pre2", "pre3", "pre4", "alt1" });
++		for (String serviceId : serviceIds) {
++			deleteDiscoveryService(serviceId, failOnError);
++		}
++	}
++
++	private void createDiscoveryServices() throws ValidationException, JSONException {
++		createDiscoveryService("ser1", new String[] { "an1", "com1", "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("ser2", new String[] { "an2", "com1"         }, new String[] { "pre2"         }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("ser3", new String[] {                "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("ser4", new String[] { "an1", "com1", "com2" }, new String[] { "pre1", "pre2" }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("ser5", new String[] {        "com1", "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
++
++		createDiscoveryService("pre1", new String[] { "pre1"                }, new String[] { "pre4"         }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("pre2", new String[] { "pre2"                }, new String[] {                }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("pre3", new String[] { "pre1"                }, new String[] {                }, new String[] { "Table", "DataFile" });
++		createDiscoveryService("pre4", new String[] { "pre4"                }, new String[] {                }, new String[] { "Table", "DataFile" });
++
++		createDiscoveryService("alt1", new String[] { "pre1"                }, new String[] {                }, new String[] { "Table", "DataFile" });
++	}
++
++	@Test
++	public void testDiscoveryServiceSequences() throws Exception {
++		deleteDiscoveryServices(false);
++		createDiscoveryServices();
++
++		AnalysisRequest request = new AnalysisRequest();
++		request.setAnnotationTypes(Arrays.asList( new String[] { "an1", "com2" }));
++		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
++		logger.log(Level.INFO, "Printing list of mapper result to stdout.");
++		int i = 0;
++		for (DeclarativeRequestMapper.DiscoveryServiceSequence discoveryApproach : mapper.getDiscoveryServiceSequences()) {
++			String sequence = Utils.joinStrings(new ArrayList<String>(discoveryApproach.getServiceSequence()), ',');
++			System.out.println(sequence);
++			if (i < EXPECTED_SERVICE_SEQUENCES.length) {
++				Assert.assertTrue(sequence.equals(EXPECTED_SERVICE_SEQUENCES[i++]));
++			}
++		}
++		Assert.assertEquals("Number of calculated discovery service sequences does not match expected value.", 36, mapper.getDiscoveryServiceSequences().size());
++
++		deleteDiscoveryServices(true);
++	}
++
++	@Test
++	public void testRecommendedDiscoveryServiceSequence() throws Exception {
++		deleteDiscoveryServices(false);
++		createDiscoveryServices();
++
++		AnalysisRequest request = new AnalysisRequest();
++		request.setAnnotationTypes(Arrays.asList( new String[] { "com2", "pre4" }));
++		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
++		Assert.assertEquals("Recommended sequence does not match expected string.", "pre4,pre1,ser1", Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
++
++		deleteDiscoveryServices(true);
++	}
++
++	@Test
++	public void testRemoveFailingService() throws Exception {
++		deleteDiscoveryServices(false);
++		createDiscoveryServices();
++
++		AnalysisRequest request = new AnalysisRequest();
++		request.setAnnotationTypes(Arrays.asList(new String[] { "an1", "com2" }));
++		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
++		Assert.assertEquals("Original sequence does not match expected string.", EXPECTED_SERVICE_SEQUENCES[0], Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
++
++		mapper.removeDiscoveryServiceSequences("ser1");
++		Assert.assertEquals("Updated sequence does not match expected string.", "pre3,pre2,ser4", Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
++
++		deleteDiscoveryServices(true);
++	}
++
++	@Test
++	public void testRequestWithManyAnnotationTypes() throws Exception {
++		deleteDiscoveryServices(false);
++		createDiscoveryServices();
++
++		AnalysisRequest request = new AnalysisRequest();
++		request.setAnnotationTypes(Arrays.asList(new String[] {  "an1", "an2", "com1", "com2", "pre1", "pre2", "pre4" }));
++		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
++		Assert.assertEquals("Number of calculated discovery service sequences does not match expected value.", 75, mapper.getDiscoveryServiceSequences().size());
++
++		deleteDiscoveryServices(true);
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java
+new file mode 100755
+index 0000000..96a4fee
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java
+@@ -0,0 +1,172 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertTrue;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
++import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++
++public class DefaultThreadManagerTest extends TimerTestBase {
++
++	int threadMS = 100;
++	int waitMS = 5000;
++	
++	Logger logger = ODFTestLogger.get();
++
++	class TestRunnable implements ODFRunnable {
++
++		String id;
++		boolean cancelled = false;
++		long msToWaitBeforeFinish;
++		
++		public TestRunnable(String id, long msToWaitBeforeFinish) {
++			this.id = id;
++			this.msToWaitBeforeFinish = msToWaitBeforeFinish;
++		}
++		
++		public TestRunnable(String id) {
++			this(id, threadMS);
++		}
++
++		@Override
++		public void run() {
++			logger.info("Starting thread with ID: " + id);
++			try {
++				Thread.sleep(msToWaitBeforeFinish);
++			} catch (InterruptedException e) {
++				// TODO Auto-generated catch block
++				e.printStackTrace();
++			}
++			logger.info("Thread finished with ID: " + id);
++
++		}
++
++		@Override
++		public void setExecutorService(ExecutorService service) {
++			// TODO Auto-generated method stub
++
++		}
++
++		@Override
++		public void cancel() {
++			cancelled = true;
++		}
++
++		@Override
++		public boolean isReady() {
++			return true;
++		}
++
++	}
++
++	@Test
++	public void testSimple() throws Exception {
++		ODFInternalFactory f = new ODFInternalFactory();
++		ThreadManager tm = f.create(ThreadManager.class);
++		tm.setExecutorService(f.create(ExecutorServiceFactory.class).createExecutorService());
++		assertNotNull(tm);
++
++		String id1 = "id1";
++		String id2 = "id2";
++
++		// start id1
++		ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id1);
++		Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
++
++		boolean b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
++		assertTrue(b);
++		b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
++		assertFalse(b);
++
++		st = tm.getStateOfUnmanagedThread(id1);
++		Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, st);
++
++		// start id2
++		st = tm.getStateOfUnmanagedThread(id2);
++		Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
++
++		b = tm.startUnmanagedThread(id2, new TestRunnable(id2)).isNewThreadCreated();
++		assertTrue(b);
++		b = tm.startUnmanagedThread(id2, new TestRunnable(id2)).isNewThreadCreated();
++		assertFalse(b);
++
++		Thread.sleep(waitMS);
++		st = tm.getStateOfUnmanagedThread(id1);
++		Assert.assertEquals(ThreadStatus.ThreadState.FINISHED, st);
++		b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
++		assertTrue(b);
++
++		st = tm.getStateOfUnmanagedThread(id2);
++		// id2 should be removed from thread list
++		Assert.assertTrue(ThreadStatus.ThreadState.FINISHED.equals(st) || ThreadStatus.ThreadState.NON_EXISTENT.equals(st));
++
++		tm.shutdownThreads(Arrays.asList("id1", "id2"));
++	}
++
++	@Test
++	public void testManyThreads() throws Exception {
++		ODFInternalFactory f = new ODFInternalFactory();
++		ThreadManager tm = f.create(ThreadManager.class);
++		tm.setExecutorService(f.create(ExecutorServiceFactory.class).createExecutorService());
++
++		assertNotNull(tm);
++
++		List<String> threadIds = new ArrayList<>();
++		int THREAD_NUM = 20;
++		for (int i = 0; i < THREAD_NUM; i++) {
++			String id = "ThreadID" + i;
++			threadIds.add(id);
++			ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id);
++			Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
++
++			boolean b = tm.startUnmanagedThread(id, new TestRunnable(id)).isNewThreadCreated();
++			assertTrue(b);
++			b = tm.startUnmanagedThread(id, new TestRunnable(id)).isNewThreadCreated();
++			assertFalse(b);
++
++			st = tm.getStateOfUnmanagedThread(id);
++			Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, st);
++
++		}
++		logger.info("All threads scheduled");
++
++		Thread.sleep(waitMS);
++
++		for (int i = 0; i < THREAD_NUM; i++) {
++			String id = "ThreadID" + i;
++			ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id);
++			Assert.assertEquals(ThreadStatus.ThreadState.FINISHED, st);
++		}
++		tm.shutdownThreads(threadIds);
++
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java
+new file mode 100755
+index 0000000..900c214
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java
+@@ -0,0 +1,373 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.logging.Level;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
++import org.apache.atlas.odf.core.metadata.DefaultMetadataStore;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Ignore;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++
++public class ODFAPITest extends ODFTestBase {
++
++	public static int WAIT_MS_BETWEEN_POLLING = 500;
++	public static int MAX_NUMBER_OF_POLLS = 500;
++	public static String DUMMY_SUCCESS_ID = "success";
++	public static String DUMMY_ERROR_ID = "error";
++
++	public static void runRequestAndCheckResult(String dataSetID, AnalysisRequestStatus.State expectedState, int expectedProcessedDiscoveryRequests) throws Exception{
++		runRequestAndCheckResult(Collections.singletonList(dataSetID), expectedState, expectedProcessedDiscoveryRequests);
++	}
++	
++	public static void runRequestAndCheckResult(List<String> dataSetIDs, AnalysisRequestStatus.State expectedState, int expectedProcessedDiscoveryRequests) throws Exception{
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		String id = runRequest(dataSetIDs, analysisManager);
++		log.info("Running request "+id+" on data sets: " + dataSetIDs);
++		AnalysisRequestStatus status = null;
++
++		int maxPolls = MAX_NUMBER_OF_POLLS;
++		do {
++			status = analysisManager.getAnalysisRequestStatus(id);
++			log.log(Level.INFO, "{4}th poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
++					expectedState, (MAX_NUMBER_OF_POLLS-maxPolls) });
++			maxPolls--;
++			Thread.sleep(WAIT_MS_BETWEEN_POLLING);
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++
++		log.log(Level.INFO, "Polling result after {0} polls for request id {1}: status: {2}", new Object[] {(MAX_NUMBER_OF_POLLS-maxPolls), id, status.getState()});
++		
++		Assert.assertTrue(maxPolls > 0);		
++		Assert.assertEquals(expectedState, status.getState());
++		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
++		AnalysisRequestTracker tracker = store.query(id);
++		Assert.assertNotNull(tracker);
++		checkTracker(tracker, expectedProcessedDiscoveryRequests);
++		log.info("Status details: " + status.getDetails());
++	}
++
++	static void checkTracker(AnalysisRequestTracker tracker, int expectedProcessedDiscoveryRequests) {
++		if (expectedProcessedDiscoveryRequests == -1) {
++			expectedProcessedDiscoveryRequests = tracker.getDiscoveryServiceRequests().size(); 
++		}
++		Assert.assertEquals(expectedProcessedDiscoveryRequests, tracker.getDiscoveryServiceResponses().size());
++		
++	}
++
++	static String runRequest(String dataSetID, AnalysisManager analysisManager) throws Exception {
++		return runRequest(Collections.singletonList(dataSetID), analysisManager);
++	}
++
++	public static String runRequest(List<String> dataSetIDs, AnalysisManager analysisManager) throws Exception {
++		AnalysisRequest request = createAnalysisRequest(dataSetIDs);
++		log.info("Starting analyis");
++		AnalysisResponse response = analysisManager.runAnalysis(request);
++		Assert.assertNotNull(response);
++		Assert.assertFalse(response.isInvalidRequest());
++		String id = response.getId();
++		Assert.assertNotNull(id);
++		return id;
++	}
++
++	
++	@Test
++	public void testSimpleSuccess() throws Exception {
++		runRequestAndCheckResult("successID", AnalysisRequestStatus.State.FINISHED, -1);
++	}
++
++	public static void waitForRequest(String requestId, AnalysisManager analysisManager) {
++		waitForRequest(requestId, analysisManager, MAX_NUMBER_OF_POLLS);
++	}
++	
++	public static void waitForRequest(String requestId, AnalysisManager analysisManager, int maxPolls) {
++		AnalysisRequestStatus status = null;
++
++		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
++		do {
++			status = analysisManager.getAnalysisRequestStatus(requestId);
++			
++			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
++			maxPolls--;
++			try {
++				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++				throw new RuntimeException(e);
++			}
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++		if (maxPolls == 0) {
++			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
++		}
++		log.log(Level.INFO, "Request ''{0}'' is finished with state: ''{1}''", new Object[] { requestId, status.getState() });
++	}
++
++	public static boolean waitForRequest(String requestId, AnalysisManager analysisManager, int maxPolls, AnalysisRequestStatus.State expectedState) {
++		AnalysisRequestStatus status = null;
++
++		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
++		do {
++			status = analysisManager.getAnalysisRequestStatus(requestId);
++			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
++			maxPolls--;
++			try {
++				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++				throw new RuntimeException(e);
++			}
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++		if (maxPolls == 0) {
++			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
++		}
++		return expectedState.equals(status.getState());
++	}
++
++	
++	@Test
++	public void testSimpleSuccessDuplicate() throws Exception {
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		String id = runRequest("successID", analysisManager);
++		String secondId = runRequest("successID", analysisManager);
++		Assert.assertNotEquals(id, secondId);
++		//Wait limit and try if new analysis is started
++		Thread.sleep(DefaultStatusQueueStore.IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS*2 + 5000);
++		String thirdId = runRequest("successID", analysisManager);
++		Assert.assertNotEquals(secondId, thirdId);
++		waitForRequest(id, analysisManager);
++		waitForRequest(thirdId, analysisManager);
++	}
++
++	@Test
++	public void testSimpleSuccessNoDuplicate() throws Exception {
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		String id = runRequest("successID", analysisManager);
++		String secondId = runRequest("successID2", analysisManager);
++		Assert.assertNotEquals(id, secondId);
++		waitForRequest(id, analysisManager);
++		waitForRequest(secondId, analysisManager);
++	}
++
++	@Test
++	public void testSimpleSuccessDuplicateSubset() throws Exception {
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		String id = runRequest(Arrays.asList("successID", "successID2", "successID3"), analysisManager);
++		String secondId = runRequest("successID2", analysisManager);
++		Assert.assertNotEquals(id, secondId);
++		Thread.sleep(DefaultStatusQueueStore.IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS + 5000);
++		String thirdId = runRequest("successID", analysisManager);
++		Assert.assertNotEquals(secondId, thirdId);
++		waitForRequest(id, analysisManager);
++		waitForRequest(thirdId, analysisManager);
++	}
++	
++	/**
++	 * This test depends on the speed of execution.
++	 * An analysis that is not in state INITIALIZED or IN_SERVICE_QUEUE cannot be cancelled. 
++	 * Therefore if the analysis is started too quickly this test will fail!
++	 * 
++	 * Ignore for now as this can go wrong in the build.
++	 */
++	@Test
++	@Ignore
++	public void testCancelRequest() throws Exception {
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		String id = runRequest(Arrays.asList("successID", "successID2", "successID3"), analysisManager);
++		AnalysisCancelResult cancelAnalysisRequest = analysisManager.cancelAnalysisRequest(id);
++		Assert.assertEquals(cancelAnalysisRequest.getState(), AnalysisCancelResult.State.SUCCESS);
++		String secondId = runRequest("successID2", analysisManager);
++		Assert.assertNotEquals(id, secondId);
++	}
++
++	
++	@Test
++	public void testRequestsWithDataSetListSuccess() throws Exception {
++		runRequestAndCheckResult(Arrays.asList("success1", "success2", "success3"), AnalysisRequestStatus.State.FINISHED, 6);
++	}
++	
++	@Test
++	public void testRequestsWithDataSetListError() throws Exception {
++		runRequestAndCheckResult(Arrays.asList("success1", "error2", "success3"), AnalysisRequestStatus.State.ERROR, 3);
++	}
++
++		
++
++	@Test
++	public void testSimpleFailure() throws Exception {
++		runRequestAndCheckResult("errorID", AnalysisRequestStatus.State.ERROR, 1);
++	}
++	
++	@Test 
++	public void testManyRequests()  throws Exception {
++		List<String> dataSets = new ArrayList<String>();
++		List<AnalysisRequestStatus.State> expectedStates = new ArrayList<AnalysisRequestStatus.State>();
++		int dataSetNum = 5;
++		for (int i=0; i<dataSetNum; i++) {
++			AnalysisRequestStatus.State expectedState = AnalysisRequestStatus.State.FINISHED;
++			String dataSet = "successdataSet" + i;
++			if (i % 3 == 0) {
++				// every third data set should fail
++				dataSet = "errorDataSet" + i;
++				expectedState = AnalysisRequestStatus.State.ERROR;
++			} 
++			dataSets.add(dataSet);
++			expectedStates.add(expectedState);
++		}
++		
++		runRequests(dataSets, expectedStates);
++	}
++
++	public void runRequests(List<String> dataSetIDs, List<AnalysisRequestStatus.State> expectedStates) throws Exception {
++		Assert.assertTrue(dataSetIDs.size() == expectedStates.size());
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++
++		Map<AnalysisRequest, AnalysisRequestStatus.State> request2ExpectedState = new HashMap<AnalysisRequest, AnalysisRequestStatus.State>();
++
++		for (int i = 0; i < dataSetIDs.size(); i++) {
++			String dataSetID = dataSetIDs.get(i);
++			AnalysisRequestStatus.State expectedState = expectedStates.get(i);
++
++			AnalysisRequest request = createAnalysisRequest(Collections.singletonList(dataSetID));
++
++			log.info("Starting analyis");
++			AnalysisResponse response = analysisManager.runAnalysis(request);
++			Assert.assertNotNull(response);
++			String id = response.getId();
++			Assert.assertFalse(response.isInvalidRequest());
++			Assert.assertNotNull(id);
++			request.setId(id);
++			request2ExpectedState.put(request, expectedState);
++		}
++
++		//		Set<AnalysisRequest> finishedRequests = new HashSet<AnalysisRequest>();
++		Map<AnalysisRequest, AnalysisRequestStatus> actualFinalStatePerRequest = new HashMap<AnalysisRequest, AnalysisRequestStatus>();
++		int maxPollPasses = 10;
++		for (int i = 0; i < maxPollPasses; i++) {
++			log.info("Polling all requests for the " + i + " th time");
++			boolean allRequestsFinished = true;
++			for (Map.Entry<AnalysisRequest, AnalysisRequestStatus.State> entry : request2ExpectedState.entrySet()) {
++
++				AnalysisRequest request = entry.getKey();
++				String id = request.getId();
++				if (actualFinalStatePerRequest.containsKey(request)) {
++					log.log(Level.INFO, "Request with ID ''{0}'' already finished, skipping it", id);
++				} else {
++					allRequestsFinished = false;
++
++					AnalysisRequestStatus.State expectedState = entry.getValue();
++
++					AnalysisRequestStatus status = null;
++
++					int maxPollsPerRequest = 3;
++					do {
++						status = analysisManager.getAnalysisRequestStatus(id);
++						log.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''",
++								new Object[] { id, status.getState(), status.getDetails(), expectedState });
++						maxPollsPerRequest--;
++						Thread.sleep(1000);
++					} while (maxPollsPerRequest > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++
++					if (maxPollsPerRequest > 0) {
++						// final state found
++						actualFinalStatePerRequest.put(request, status);
++						//				Assert.assertEquals(expectedState, status.getState());
++					}
++				}
++			}
++			if (allRequestsFinished) {
++				log.info("All requests finished");
++				break;
++			}
++		}
++		Assert.assertTrue(actualFinalStatePerRequest.size() == request2ExpectedState.size());
++		Assert.assertTrue(actualFinalStatePerRequest.keySet().equals(request2ExpectedState.keySet()));
++		for (Map.Entry<AnalysisRequest, AnalysisRequestStatus> actual : actualFinalStatePerRequest.entrySet()) {
++			AnalysisRequest req = actual.getKey();
++			Assert.assertNotNull(req);
++			AnalysisRequestStatus.State expectedState = request2ExpectedState.get(req);
++			Assert.assertNotNull(expectedState);
++			AnalysisRequestStatus.State actualState = actual.getValue().getState();
++			Assert.assertNotNull(actualState);
++
++			log.log(Level.INFO, "Checking request ID ''{0}'', actual state: ''{1}'', expected state: ''{2}''", new Object[] { req.getId(), actualState, expectedState });
++			Assert.assertNotNull(expectedState);
++			Assert.assertEquals(expectedState, actualState);
++		}
++	}
++
++	public static AnalysisRequest createAnalysisRequest(List<String> dataSetIDs) throws JSONException {
++		AnalysisRequest request = new AnalysisRequest();
++		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		if (!(mds instanceof DefaultMetadataStore)) {
++			throw new RuntimeException(MessageFormat.format("This tests does not work with metadata store implementation \"{0}\" but only with the DefaultMetadataStore.", mds.getClass().getName()));
++		}
++		DefaultMetadataStore defaultMds = (DefaultMetadataStore) mds;
++		defaultMds.resetAllData();
++		for (String id : dataSetIDs) {
++			MetaDataObjectReference mdr = new MetaDataObjectReference();
++			mdr.setId(id);
++			dataSetRefs.add(mdr);
++			if (id.startsWith(DUMMY_SUCCESS_ID) || id.startsWith(DUMMY_ERROR_ID)) {
++				log.info("Creating dummy data set for reference : " + id.toString());
++				DataSet ds = new UnknownDataSet();
++				ds.setReference(mdr);
++				defaultMds.createObject(ds);
++			}
++		}
++		defaultMds.commit();
++		request.setDataSets(dataSetRefs);
++		List<String> serviceIds = Arrays.asList(new String[]{"asynctestservice", "synctestservice"});
++		/* use a fix list of services 
++		List<DiscoveryServiceRegistrationInfo> registeredServices = new ODFFactory().create(ControlCenter.class).getConfig().getRegisteredServices();		
++		for(DiscoveryServiceRegistrationInfo service : registeredServices){
++			serviceIds.add(service.getId());
++		}
++		*/
++		request.setDiscoveryServiceSequence(serviceIds);
++		Map<String, Object> additionalProps = new HashMap<String, Object>();
++		additionalProps.put("aaa", "bbb");
++		JSONObject jo = new JSONObject();
++		jo.put("p1", "v1");
++		jo.put("p2", "v2");
++		additionalProps.put("jo", jo);
++		request.setAdditionalProperties(additionalProps);
++		return request;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java
+new file mode 100755
+index 0000000..9aa3ba4
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java
+@@ -0,0 +1,101 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.engine.EngineManager;
++import org.apache.atlas.odf.api.engine.SystemHealth;
++import org.apache.atlas.odf.api.engine.SystemHealth.HealthStatus;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.analysis.AnalysisManagerImpl;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++
++public class ParallelODFTest extends ODFTestcase {
++	Logger log = ODFTestLogger.get();
++	
++	@Test
++	public void runDataSetsInParallelSuccess() throws Exception {
++		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "successID2" }), State.FINISHED);
++	}
++
++	@Test 
++	public void runDataSetsInParallelError() throws Exception {
++		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "errorID2" }), State.ERROR);
++	}
++
++	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, State expectedState) throws Exception {
++		log.info("Running data sets in parallel: " + dataSetIDs);
++		log.info("Expected state: " + expectedState);
++		AnalysisRequest req = ODFAPITest.createAnalysisRequest(dataSetIDs);
++		// Enable parallel processing because this is a parallel test
++		req.setProcessDataSetsSequentially(false);
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		EngineManager engineManager = new ODFFactory().create().getEngineManager();
++
++		SystemHealth healthCheckResult = engineManager.checkHealthStatus();
++		Assert.assertEquals(HealthStatus.OK, healthCheckResult.getStatus());
++		AnalysisResponse resp = analysisManager.runAnalysis(req);
++		log.info("Parallel requests started");
++
++		String id = resp.getId();
++		List<String> singleIds = Utils.splitString(id, AnalysisManagerImpl.COMPOUND_REQUEST_SEPARATOR);
++		List<String> singleDetails = Utils.splitString(resp.getDetails(), AnalysisManagerImpl.COMPOUND_REQUEST_SEPARATOR);
++		Assert.assertEquals(dataSetIDs.size(), singleIds.size());
++		Assert.assertEquals(dataSetIDs.size(), singleDetails.size());
++
++		AnalysisRequestStatus status = null;
++
++		// check that requests are processed in parallel: 
++		//   there must be a point in time where both requests are in status "active"
++		log.info("Polling for status of parallel request...");
++		boolean foundPointInTimeWhereBothRequestsAreActive = false;
++		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
++		do {
++			List<State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
++			for (String singleId : singleIds) {
++				allSingleStates.add(analysisManager.getAnalysisRequestStatus(singleId).getState());
++			}
++			if (Utils.containsOnly(allSingleStates, new State[] { State.ACTIVE })) {
++				foundPointInTimeWhereBothRequestsAreActive = true;
++			}
++
++			status = analysisManager.getAnalysisRequestStatus(id);
++			log.log(Level.INFO, "Poll request for parallel request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
++					expectedState });
++			log.info("States of single requests: " + singleIds + ": " + allSingleStates);
++			maxPolls--;
++			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
++		} while (maxPolls > 0 && (status.getState() == State.ACTIVE || status.getState() == State.QUEUED));
++
++		Assert.assertTrue(maxPolls > 0);
++		Assert.assertEquals(expectedState, status.getState());
++		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
++		log.info("Parallel request status details: " + status.getDetails());
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java
+new file mode 100755
+index 0000000..9a43b78
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java
+@@ -0,0 +1,66 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.controlcenter;
++
++import java.util.logging.Level;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++
++public class SetTrackerStatusTest extends ODFTestBase {
++
++	@Test
++	public void testSetTrackerStatus() throws Exception {
++		AnalysisManager am = new ODFFactory().create().getAnalysisManager();
++		AnalysisRequestTrackerStore arts = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
++		String requestId = ODFAPITest.runRequest("successId", am);
++		Thread.sleep(1000);
++		long cutOffTimestamp = System.currentTimeMillis();		
++		String testMessage = "Message was set to error at " + cutOffTimestamp;
++		arts.setStatusOfOldRequest(cutOffTimestamp, STATUS.ERROR, testMessage);
++		AnalysisRequestTracker tracker = arts.query(requestId);
++		Assert.assertEquals(STATUS.ERROR, tracker.getStatus());
++		Assert.assertEquals(testMessage, tracker.getStatusDetails());
++		
++		// wait until request is finished and state is set back to finished
++		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
++		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
++		AnalysisRequestStatus status = null;
++		do {
++			status = am.getAnalysisRequestStatus(requestId);
++			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
++			maxPolls--;
++			try {
++				Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
++			} catch (InterruptedException e) {
++				// TODO Auto-generated catch block
++				e.printStackTrace();
++			}
++		} while (maxPolls > 0 && (status.getState() != AnalysisRequestStatus.State.FINISHED) );
++		
++		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, am.getAnalysisRequestStatus(requestId).getState());
++		tracker = arts.query(requestId);
++		Assert.assertEquals(STATUS.FINISHED, tracker.getStatus());
++	}
++	
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java
+new file mode 100755
+index 0000000..0f1aa8f
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java
+@@ -0,0 +1,135 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.discoveryservice;
++
++import java.io.InputStream;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++import org.junit.Assert;
++import org.junit.Ignore;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++
++public class DiscoveryServiceManagerTest {
++	
++	final private static String ASYNCTESTWA_SERVICE_ID = "asynctestservice-with-annotations";
++
++	final private static String NEW_SERVICE_ID = "New_Service";
++	final private static String NEW_SERVICE_NAME = "Name of New Service";
++	final private static String NEW_SERVICE_DESCRIPTION = "Description of the New Service";
++	final private static String NEW_SERVICE_CLASSNAME = "TestAsyncDiscoveryService1";
++	
++	final private static String UPDATED_SERVICE_DESCRIPTION = "Updated description of the New Service";
++	final private static String UPDATED_SERVICE_CLASSNAME = "TestSyncDiscoveryService1";
++	
++	private void registerDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		discoveryServicesManager.createDiscoveryService(dsProperties);
++	}
++	
++	private void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		discoveryServicesManager.replaceDiscoveryService(dsProperties);
++	}
++	
++	private void unregisterDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		discoveryServicesManager.deleteDiscoveryService(serviceId);
++	}
++		
++	@Test
++	public void testGetDiscoveryServiceProperties() throws ServiceNotFoundException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		DiscoveryServiceProperties dsProperties = discoveryServicesManager.getDiscoveryServiceProperties(ASYNCTESTWA_SERVICE_ID);
++		Assert.assertNotNull(dsProperties);
++	}
++	
++		
++	@Ignore @Test    // Ignoring testcase due to problem on Mac (issue #56)
++	public void testGetDiscoveryServiceStatus() throws ServiceNotFoundException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		DiscoveryServiceStatus dsStatus = discoveryServicesManager.getDiscoveryServiceStatus(ASYNCTESTWA_SERVICE_ID);
++		Assert.assertNotNull(dsStatus);
++	}
++	
++	@Test  // TODO: need to adjust as soon as runtime statistics are available
++	public void testGetDiscoveryServiceRuntimeStatistics() throws ServiceNotFoundException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		DiscoveryServiceRuntimeStatistics dsRuntimeStats = discoveryServicesManager.getDiscoveryServiceRuntimeStatistics(ASYNCTESTWA_SERVICE_ID);
++		Assert.assertNotNull(dsRuntimeStats);
++		long avgProcTime = dsRuntimeStats.getAverageProcessingTimePerItemInMillis();
++		Assert.assertEquals(0, avgProcTime);
++	}
++
++	@Test
++	public void testDeleteDiscoveryServiceRuntimeStatistics() throws ServiceNotFoundException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		discoveryServicesManager.deleteDiscoveryServiceRuntimeStatistics(ASYNCTESTWA_SERVICE_ID);
++	}
++
++	@Test
++	public void testGetDiscoveryServiceImage() throws ServiceNotFoundException {
++		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
++		InputStream is = discoveryServicesManager.getDiscoveryServiceImage(ASYNCTESTWA_SERVICE_ID);
++		Assert.assertNull(is);
++	}
++
++	@Test
++	public void testCreateUpdateDelete() throws ServiceNotFoundException, ValidationException, JSONException {
++		DiscoveryServiceJavaEndpoint dse = new DiscoveryServiceJavaEndpoint();
++		dse.setClassName(NEW_SERVICE_CLASSNAME);
++		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
++		dsProperties.setId(NEW_SERVICE_ID);
++		dsProperties.setName(NEW_SERVICE_NAME);
++		dsProperties.setDescription(NEW_SERVICE_DESCRIPTION);
++		dsProperties.setLink(null);
++		dsProperties.setPrerequisiteAnnotationTypes(null);
++		dsProperties.setResultingAnnotationTypes(null);
++		dsProperties.setSupportedObjectTypes(null);
++		dsProperties.setAssignedObjectTypes(null);
++		dsProperties.setAssignedObjectCandidates(null);
++		dsProperties.setEndpoint(JSONUtils.convert(dse, DiscoveryServiceEndpoint.class));
++		dsProperties.setParallelismCount(2);
++		registerDiscoveryService(dsProperties);
++
++		DiscoveryServiceJavaEndpoint dse2 = new DiscoveryServiceJavaEndpoint();
++		dse2.setClassName(UPDATED_SERVICE_CLASSNAME);
++		DiscoveryServiceProperties dsProperties2 = new DiscoveryServiceProperties();
++		dsProperties2.setId(NEW_SERVICE_ID);
++		dsProperties2.setName(NEW_SERVICE_NAME);
++		dsProperties2.setDescription(UPDATED_SERVICE_DESCRIPTION);
++		dsProperties2.setLink(null);
++		dsProperties.setPrerequisiteAnnotationTypes(null);
++		dsProperties.setResultingAnnotationTypes(null);
++		dsProperties.setSupportedObjectTypes(null);
++		dsProperties.setAssignedObjectTypes(null);
++		dsProperties.setAssignedObjectCandidates(null);
++		dsProperties2.setEndpoint(JSONUtils.convert(dse2, DiscoveryServiceEndpoint.class));
++		dsProperties2.setParallelismCount(2);
++		replaceDiscoveryService(dsProperties2);
++
++		unregisterDiscoveryService(NEW_SERVICE_ID);
++	}
++	
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java
+new file mode 100755
+index 0000000..2ea85b7
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java
+@@ -0,0 +1,227 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.discoveryservice;
++
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.Map;
++import java.util.Set;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
++import org.junit.Assert;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class TestAsyncDiscoveryService1 extends DiscoveryServiceBase implements AsyncDiscoveryService {
++
++	static int unavailableCounter = 0;
++
++	static Logger logger = ODFTestLogger.get();
++
++	public static void checkUserAndAdditionalProperties(DiscoveryServiceRequest request) {
++		String user = request.getUser();
++		
++		String defaultUser = System.getProperty("user.name");
++		Assert.assertEquals(defaultUser, user);
++
++		Map<String, Object> additionalProperties = request.getAdditionalProperties();
++		logger.info("TestAsyncDiscoveryService1.startAnalysis additional properties: " + additionalProperties);
++		Assert.assertNotNull(additionalProperties);
++		
++		// check that environment entries are also available additional properties
++		Environment ev = new ODFInternalFactory().create(Environment.class);
++		String dsId = request.getDiscoveryServiceId();
++		Map<String, String> serviceEnvProps = ev.getPropertiesWithPrefix(dsId);
++		if (!serviceEnvProps.isEmpty()) {
++			Assert.assertTrue(!additionalProperties.isEmpty());
++			for (Map.Entry<String, String> serviceEnvProp : serviceEnvProps.entrySet()) {
++				String key = serviceEnvProp.getKey();
++				String val = serviceEnvProp.getValue();
++				logger.info("Found discoveryservice configuration parameter: " + key + " with value " + val);
++				Assert.assertTrue(key.startsWith(dsId));
++				Assert.assertTrue(additionalProperties.containsKey(key) );
++				Assert.assertEquals(val, additionalProperties.get(key));
++			}
++		}
++		
++		if (!additionalProperties.isEmpty()) {
++			Assert.assertTrue(additionalProperties.containsKey("aaa"));
++			Assert.assertTrue("bbb".equals(additionalProperties.get("aaa")));
++			Assert.assertTrue(additionalProperties.containsKey("jo"));
++			@SuppressWarnings("unchecked")
++			Map<String, Object> m = (Map<String, Object>) additionalProperties.get("jo");
++			Assert.assertTrue("v1".equals(m.get("p1")));
++			Assert.assertTrue("v2".equals(m.get("p2")));
++			/*
++			if (!additionalProperties.containsKey("aaa")) {
++				response.setCode(ResponseCode.UNKNOWN_ERROR);
++				response.setDetails("Additional property value 'aaa' doesn't exist");
++				return;
++			}
++			if (!"bbb".equals(additionalProperties.get("aaa"))) {
++				response.setCode(ResponseCode.UNKNOWN_ERROR);
++				response.setDetails("Additional properties 'aaa' has wrong value");
++				return;
++			}
++			if (!additionalProperties.containsKey("jo")) {
++				response.setCode(ResponseCode.UNKNOWN_ERROR);
++				response.setDetails("Additional property value 'jo' doesn't exist");
++				return;
++			}
++			Map m = (Map) additionalProperties.get("jo");
++			if (!"v1".equals(m.get("p1"))) {
++				response.setCode(ResponseCode.UNKNOWN_ERROR);
++				response.setDetails("Additional property value 'jo.p1' doesn't exist");
++				return;
++
++			}
++			if (!"v2".equals(m.get("p2"))) {
++				response.setCode(ResponseCode.UNKNOWN_ERROR);
++				response.setDetails("Additional property value 'jo.p2' doesn't exist");
++				return;
++			}
++			*/
++		}
++	}
++	
++	@Override
++	public DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request) {
++		try {
++			DiscoveryServiceResponse.ResponseCode code = DiscoveryServiceResponse.ResponseCode.TEMPORARILY_UNAVAILABLE;
++			String details = "Cannot answer right now";
++			if (unavailableCounter % 2 == 0) {
++				code = DiscoveryServiceResponse.ResponseCode.OK;
++				details = "Everything's peachy";
++			}
++			unavailableCounter++;
++			/*
++			if (unavailableCounter % 3 == 0) {
++				code = CODE.NOT_AUTHORIZED;
++				details = "You have no power here!";
++			}
++			*/
++			DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
++			response.setCode(code);
++			response.setDetails(details);
++			if (code == DiscoveryServiceResponse.ResponseCode.OK) {
++				String runid = "TestAsyncService1" + UUID.randomUUID().toString();
++				synchronized (lock) {
++					runIDsRunning.put(runid, 4); // return status "running" 4 times before finishing
++				}
++				response.setRunId(runid);
++				String dataSetId = request.getDataSetContainer().getDataSet().getReference().getId();
++				if (dataSetId.startsWith("error")) {
++					logger.info("TestAsync Discovery Service run " + runid + " will fail");
++					runIDsWithError.add(runid);
++				} else {
++					logger.info("TestAsync Discovery Service run " + runid + " will succeed");
++				}
++			}
++			logger.info("TestAsyncDiscoveryService1.startAnalysis returns: " + JSONUtils.lazyJSONSerializer(response));
++			checkUserAndAdditionalProperties(request);
++			/*
++			String user = request.getUser();
++			Assert.assertEquals(TestControlCenter.TEST_USER_ID, user);
++
++			Map<String, Object> additionalProperties = request.getAdditionalProperties();
++			logger.info("TestAsyncDiscoveryService1.startAnalysis additional properties: " + additionalProperties);
++			Assert.assertNotNull(additionalProperties);
++			if (!additionalProperties.isEmpty()) {
++				if (!additionalProperties.containsKey("aaa")) {
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Additional property value 'aaa' doesn't exist");
++					return response;
++				}
++				if (!"bbb".equals(additionalProperties.get("aaa"))) {
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Additional properties 'aaa' has wrong value");
++					return response;
++				}
++				if (!additionalProperties.containsKey("jo")) {
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Additional property value 'jo' doesn't exist");
++					return response;
++				}
++				Map m = (Map) additionalProperties.get("jo");
++				if (!"v1".equals(m.get("p1"))) {
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Additional property value 'jo.p1' doesn't exist");
++					return response;
++
++				}
++				if (!"v2".equals(m.get("p2"))) {
++					response.setCode(ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Additional property value 'jo.p2' doesn't exist");
++					return response;
++				}
++			}
++			*/
++			return response;
++		} catch (Throwable t) {
++			DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
++			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++			response.setDetails(Utils.getExceptionAsString(t));
++			return response;
++		}
++	}
++
++	static Object lock = new Object();
++	static Map<String, Integer> runIDsRunning = new HashMap<String, Integer>();
++	static Set<String> runIDsWithError = Collections.synchronizedSet(new HashSet<String>());
++
++	//	static Map<String, Integer> requestIDUnavailable = new HashMap<>();
++
++	@Override
++	public DiscoveryServiceAsyncRunStatus getStatus(String runId) {
++		String details = "Run like the wind";
++		DiscoveryServiceAsyncRunStatus.State state = DiscoveryServiceAsyncRunStatus.State.RUNNING;
++		synchronized (lock) {
++			Integer i = runIDsRunning.get(runId);
++			Assert.assertNotNull(i);
++			if (i.intValue() == 0) {
++				if (runIDsWithError.contains(runId)) {
++					state = DiscoveryServiceAsyncRunStatus.State.ERROR;
++					details = "This was a mistake";
++				} else {
++					state = DiscoveryServiceAsyncRunStatus.State.FINISHED;
++					details = "Finish him!";
++				}
++			} else {
++				runIDsRunning.put(runId, i - 1);
++			}
++		}
++
++		DiscoveryServiceAsyncRunStatus status = new DiscoveryServiceAsyncRunStatus();
++		status.setRunId(runId);
++		status.setDetails(details);
++		status.setState(state);
++		logger.info("TestAsyncDiscoveryService1.getStatus returns: " + JSONUtils.lazyJSONSerializer(status));
++
++		return status;
++	}
++
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java
+new file mode 100755
+index 0000000..bd2f1a6
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java
+@@ -0,0 +1,99 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.discoveryservice;
++
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.Map;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
++import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++public class TestAsyncDiscoveryServiceWritingAnnotations1 extends DiscoveryServiceBase implements AsyncDiscoveryService {
++
++	static Logger logger = ODFTestLogger.get();
++
++	static Map<String, MyThread> id2Thread = Collections.synchronizedMap(new HashMap<String, MyThread>());
++
++	class MyThread extends Thread {
++
++		String errorMessage = null;
++		String correlationId;
++		MetaDataObjectReference dataSetRef;
++
++		public MyThread(MetaDataObjectReference dataSetRef, String correlationId) {
++			super();
++			this.dataSetRef = dataSetRef;
++			this.correlationId = correlationId;
++		}
++
++		@Override
++		public void run() {
++			this.errorMessage = TestSyncDiscoveryServiceWritingAnnotations1.createAnnotations(dataSetRef, correlationId, metadataStore, annotationStore);
++		}
++
++	}
++
++	@Override
++	public DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request) {
++		DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
++		MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
++
++		String newRunID = "RunId-" + this.getClass().getSimpleName() + "-" + UUID.randomUUID().toString();
++		MyThread t = new MyThread(dataSetRef, (String) request.getAdditionalProperties().get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID));
++		t.start();
++		id2Thread.put(newRunID, t);
++		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++		response.setRunId(newRunID);
++		response.setDetails("Thread started");
++		logger.info("Analysis writing annotations has started");
++
++		return response;
++	}
++
++	@Override
++	public DiscoveryServiceAsyncRunStatus getStatus(String runId) {
++		DiscoveryServiceAsyncRunStatus status = new DiscoveryServiceAsyncRunStatus();
++
++		MyThread t = id2Thread.get(runId);
++		status.setRunId(runId);
++		if (t == null) {
++			status.setState(DiscoveryServiceAsyncRunStatus.State.NOT_FOUND);
++		} else {
++			java.lang.Thread.State ts = t.getState();
++			if (!ts.equals(java.lang.Thread.State.TERMINATED)) {
++				status.setState(DiscoveryServiceAsyncRunStatus.State.RUNNING);
++			} else {
++				if (t.errorMessage != null) {
++					status.setState(DiscoveryServiceAsyncRunStatus.State.ERROR);
++					status.setDetails(t.errorMessage);
++				} else {
++					status.setState(DiscoveryServiceAsyncRunStatus.State.FINISHED);
++					status.setDetails("All went fine");
++				}
++			}
++		}
++		logger.info("Status of analysis with annotations: " + status.getState() + ", " + status.getDetails());
++		return status;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java
+new file mode 100755
+index 0000000..9ea92f3
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java
+@@ -0,0 +1,61 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.discoveryservice;
++
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++public class TestSyncDiscoveryService1 extends DiscoveryServiceBase implements SyncDiscoveryService {
++	static int unavailableCounter = 0;
++
++	Logger logger = ODFTestLogger.get();
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		try {
++			DiscoveryServiceResponse.ResponseCode code = DiscoveryServiceResponse.ResponseCode.TEMPORARILY_UNAVAILABLE;
++			String details = "Cannot answer right now synchronously";
++			if (unavailableCounter % 2 == 0) {
++				code = DiscoveryServiceResponse.ResponseCode.OK;
++				details = "Everything's peachy and synchronous";
++			}
++			unavailableCounter++;
++			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++			response.setDetails(details);
++			response.setCode(code);
++			if (code == DiscoveryServiceResponse.ResponseCode.OK) {
++				String dataSetId = request.getDataSetContainer().getDataSet().getReference().getId();
++				if (dataSetId.startsWith("error")) {
++					response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++					response.setDetails("Something went synchronously wrong!");
++				} else {
++					response.setDetails("All is synchronously fine!");
++				}
++				TestAsyncDiscoveryService1.checkUserAndAdditionalProperties(request);
++			}
++			logger.info(this.getClass().getSimpleName() + " service returned with code: " + response.getCode());
++			return response;
++		} catch (Throwable t) {
++			t.printStackTrace();
++			return null;
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java
+new file mode 100755
+index 0000000..62c7bf6
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java
+@@ -0,0 +1,156 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.discoveryservice;
++
++import java.util.HashSet;
++import java.util.Set;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++
++import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataSet;
++import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++
++public class TestSyncDiscoveryServiceWritingAnnotations1 extends DiscoveryServiceBase implements SyncDiscoveryService {
++
++	static Logger logger = Logger.getLogger(TestSyncDiscoveryServiceWritingAnnotations1.class.getName());
++
++	public static String checkMetaDataCache(DiscoveryServiceRequest request) {
++		logger.info("Checking metadata cache");
++		MetaDataObject mdo = request.getDataSetContainer().getDataSet();
++		MetaDataCache cache = request.getDataSetContainer().getMetaDataCache();
++		if (cache == null) {
++			return null;
++		}
++		CachedMetadataStore cacheReader = new CachedMetadataStore(cache);
++
++		if (mdo instanceof RelationalDataSet) {
++			logger.info("Checking metadata cache for columns...");
++			RelationalDataSet rds = (RelationalDataSet) mdo;
++			Set<MetaDataObjectReference> cachedColumns = new HashSet<>();
++			Set<MetaDataObjectReference> actualColumns = new HashSet<>();
++			for (MetaDataObject col : cacheReader.getColumns(rds)) {
++				cachedColumns.add(col.getReference());
++			}
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			for (MetaDataObject col : mds.getColumns(rds)) {
++				actualColumns.add(col.getReference());
++			}
++			Assert.assertTrue("Columns missing from metadata cache.", cachedColumns.containsAll(actualColumns));
++			Assert.assertTrue("Too many columns in metadata cache.", actualColumns.containsAll(cachedColumns));
++		}
++		return null;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		logger.info("Analysis started on sync test service with annotations ");
++		String errorMessage = createAnnotations( //
++				request.getDataSetContainer().getDataSet().getReference(), //
++				(String) request.getAdditionalProperties().get(REQUEST_PROPERTY_CORRELATION_ID), //
++				metadataStore, //
++				annotationStore);
++		if (errorMessage == null) {
++			errorMessage = checkMetaDataCache(request);
++		}
++		DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
++		if (errorMessage == null) {
++			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++			resp.setDetails("Annotations created successfully");
++		} else {
++			resp.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++			resp.setDetails(errorMessage);
++		}
++		logger.info("Analysis finished on sync test service with annotations ");
++
++		return resp;
++	}
++
++	public static final String REQUEST_PROPERTY_CORRELATION_ID = "REQUEST_PROPERTY_CORRELATION_ID";
++
++	static final String ANNOTATION_TYPE = "AnnotationType-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
++	static final String JSON_ATTRIBUTE = "Attribute-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
++	static final String JSON_VALUE = "Value-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
++
++	public static int getNumberOfAnnotations() {
++		return 3;
++	}
++
++	public static String[] getPropsOfNthAnnotation(int i) {
++		return new String[] { ANNOTATION_TYPE + i, JSON_ATTRIBUTE + i, JSON_VALUE + i };
++	}
++
++	public static String createAnnotations(MetaDataObjectReference dataSetRef, String correlationId, MetadataStore mds, AnnotationStore as) {
++		try {
++			TestSyncDiscoveryServiceWritingAnnotations1.logger.info("Analysis will run on data set ref: " + dataSetRef);
++			MetaDataObject dataSet = mds.retrieve(dataSetRef);
++
++			String errorMessage = null;
++			if (dataSet == null) {
++				errorMessage = "Data set with id " + dataSetRef + " could not be retrieved";
++				TestSyncDiscoveryServiceWritingAnnotations1.logger.severe(errorMessage);
++				return errorMessage;
++			}
++
++			if (!(dataSet instanceof DataSet)) {
++				errorMessage = "Object with id " + dataSetRef + " is not a data set";
++				TestSyncDiscoveryServiceWritingAnnotations1.logger.severe(errorMessage);
++				return errorMessage;
++			}
++
++			// add some annotations
++			for (int i = 0; i < getNumberOfAnnotations(); i++) {
++				String[] annotValues = getPropsOfNthAnnotation(i);
++				ProfilingAnnotation annotation1 = new ProfilingAnnotation();
++				annotation1.setProfiledObject(dataSetRef);
++				annotation1.setAnnotationType(annotValues[0]);
++				JSONObject jo1 = new JSONObject();
++				jo1.put(annotValues[1], annotValues[2]);
++				jo1.put(REQUEST_PROPERTY_CORRELATION_ID, correlationId);
++				annotation1.setJsonProperties(jo1.write());
++
++// PG: dynamic type creation disabled (types are already created statically)
++//				mds.createAnnotationTypesFromPrototypes(Collections.singletonList(annotation1));
++				MetaDataObjectReference resultRef1 = as.store(annotation1);
++				if (resultRef1 == null) {
++					throw new RuntimeException("Annotation object " + i + " could not be created");
++				}
++			}
++
++			TestSyncDiscoveryServiceWritingAnnotations1.logger.info("Discovery service " + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName() + "created annotations successfully");
++		} catch (Throwable exc) {
++			exc.printStackTrace();
++			TestSyncDiscoveryServiceWritingAnnotations1.logger.log(Level.WARNING, TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName() + " has failed", exc);
++			return "Failed: " + Utils.getExceptionAsString(exc);
++		}
++		return null;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java
+new file mode 100755
+index 0000000..2e6d012
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java
+@@ -0,0 +1,30 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.engine;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.engine.ODFVersion;
++import org.apache.atlas.odf.core.test.TimerTestBase;
++
++public class ODFVersionTest extends TimerTestBase {
++	@Test
++	public void testVersion() {
++		ODFVersion version = new ODFFactory().create().getEngineManager().getVersion();
++		Assert.assertNotNull(version);
++		Assert.assertTrue(version.getVersion().startsWith("1.2.0-"));
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java
+new file mode 100755
+index 0000000..465eb5c
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java
+@@ -0,0 +1,90 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.engine;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.engine.EngineManager;
++import org.apache.atlas.odf.api.engine.ODFEngineOptions;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
++
++public class ShutdownTest extends ODFTestBase {
++
++	private void runAndTestThreads() throws Exception {
++		ODFAPITest.runRequestAndCheckResult("successID", State.FINISHED, -1);
++		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
++		int numThreads = tm.getNumberOfRunningThreads();
++		log.info("--- Number of running threads: " + numThreads);
++		Assert.assertTrue(numThreads >= 3);		
++	}
++
++	@Test
++	public void testShutdown() throws Exception {
++
++		log.info("--- Running some request before shutdown...");
++		runAndTestThreads();
++
++		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
++		log.info("--- Number of threads before shutdown: " + tm.getNumberOfRunningThreads());
++
++		EngineManager engineManager = new ODFFactory().create().getEngineManager();
++		ODFEngineOptions options = new ODFEngineOptions();
++		options.setRestart(false);
++		int numThreads = tm.getNumberOfRunningThreads();
++		log.info("--- Number of threads before restart: " + numThreads);
++
++		engineManager.shutdown(options);
++		log.info("--- Shutdown requested...");
++		int maxWait = 60;
++		int waitCnt = 0;
++		log.info("--- Shutdown requested, waiting for max " + maxWait + " seconds");
++		while (tm.getNumberOfRunningThreads() > 0 && waitCnt < maxWait) {
++			waitCnt++;
++			Thread.sleep(1000);
++		}
++		log.info("--- Shutdown should be done by now, waited for " + waitCnt + " threads: " + tm.getNumberOfRunningThreads());
++		Assert.assertNotEquals(waitCnt, maxWait);
++
++	//	log.info("--- Starting ODF again....");
++
++	//	ODFInitializer.start();
++		log.info("--- Rerunning request after shutdown...");
++		runAndTestThreads();
++
++		int nrOfThreads = tm.getNumberOfRunningThreads();
++		options.setRestart(true);
++		engineManager.shutdown(options);
++		maxWait = nrOfThreads * 2;
++		waitCnt = 0;
++		log.info("--- Restart requested..., wait for a maximum of " + (nrOfThreads * 2500) + " ms");
++		while (tm.getNumberOfRunningThreads() > 0 && waitCnt < maxWait) {
++			waitCnt++;
++			Thread.sleep(1000);
++		}
++		log.info("--- Restart should be done by now");
++		Thread.sleep(5000);
++		numThreads = tm.getNumberOfRunningThreads();
++		log.info("--- Number of threads after restart: " + numThreads);
++		Assert.assertTrue(numThreads > 2);
++		log.info("--- testShutdown finished");
++
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java
+new file mode 100755
+index 0000000..c2be180
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java
+@@ -0,0 +1,249 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.engine.MessagingStatus;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage;
++import org.apache.atlas.odf.core.controlcenter.AdminQueueProcessor;
++import org.apache.atlas.odf.core.controlcenter.ConfigChangeQueueProcessor;
++import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
++import org.apache.atlas.odf.core.controlcenter.DiscoveryServiceStarter;
++import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
++import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
++import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class MockQueueManager implements DiscoveryServiceQueueManager {
++
++	static Logger logger = Logger.getLogger(MockQueueManager.class.getName());
++
++	static Object lock = new Object();
++
++	static List<AdminMessage> adminQueue = Collections.synchronizedList(new ArrayList<AdminMessage>());
++	static List<StatusQueueEntry> statusQueue = Collections.synchronizedList(new ArrayList<StatusQueueEntry>());
++	static Map<String, List<AnalysisRequestTracker>> runtimeQueues = new HashMap<>();
++
++	ThreadManager threadManager;
++
++	public MockQueueManager() {
++		ODFInternalFactory factory = new ODFInternalFactory();
++		ExecutorServiceFactory esf = factory.create(ExecutorServiceFactory.class);
++		threadManager = factory.create(ThreadManager.class);
++		threadManager.setExecutorService(esf.createExecutorService());
++		//initialize();
++	}
++
++	@Override
++	public void start() throws TimeoutException {
++		logger.info("Initializing MockQueueManager");
++		List<ThreadStartupResult> threads = new ArrayList<ThreadStartupResult>();
++		ThreadStartupResult startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKADMIN", createQueueListener("Admin", adminQueue, new AdminQueueProcessor(), false));
++		boolean threadCreated = startUnmanagedThread.isNewThreadCreated();
++		threads.add(startUnmanagedThread);
++		startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKADMINCONFIGCHANGE",
++				createQueueListener("AdminConfig", adminQueue, new ConfigChangeQueueProcessor(), false));
++		threadCreated |= startUnmanagedThread.isNewThreadCreated();
++		threads.add(startUnmanagedThread);
++		startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKSTATUSSTORE",
++				createQueueListener("StatusStore", statusQueue, new DefaultStatusQueueStore.StatusQueueProcessor(), true));
++		threadCreated |= startUnmanagedThread
++				.isNewThreadCreated();
++		threads.add(startUnmanagedThread);
++
++		logger.info("New thread created: " + threadCreated);
++		if (threadCreated) {
++			try {
++				this.threadManager.waitForThreadsToBeReady(5000, threads);
++				logger.info("All threads ready");
++			} catch (TimeoutException e) {
++				final String message = "Not all thrads were created on time";
++				logger.warning(message);
++			}
++		}
++	}
++
++	@Override
++	public void stop() {
++		threadManager.shutdownThreads(Arrays.asList("MOCKADMIN", "MOCKADMINCONFIGCHANGE", "MOCKSTATUSSTORE"));
++	}
++
++	<T> T cloneObject(T obj) {
++		try {
++			return JSONUtils.cloneJSONObject(obj);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++	}
++
++	@Override
++	public void enqueue(AnalysisRequestTracker tracker) {
++		tracker = cloneObject(tracker);
++		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++		if (dsRequest == null) {
++			throw new RuntimeException("Tracker is finished, should not be enqueued");
++		}
++		String dsID = dsRequest.getDiscoveryServiceId();
++		dsRequest.setPutOnRequestQueue(System.currentTimeMillis());
++		synchronized (lock) {
++			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(dsID);
++			if (runtime == null) {
++				throw new RuntimeException(MessageFormat.format("Runtime of discovery service ''{0}'' does not exist", dsID));
++			}
++			String runtimeName = runtime.getName();
++			List<AnalysisRequestTracker> mq = runtimeQueues.get(runtimeName);
++			if (mq == null) {
++				mq = Collections.synchronizedList(new ArrayList<AnalysisRequestTracker>());
++				runtimeQueues.put(runtimeName, mq);
++			}
++			boolean started = this.threadManager.startUnmanagedThread("MOCK" + runtimeName, createQueueListener("Starter" + runtimeName, mq, new DiscoveryServiceStarter(), false))
++					.isNewThreadCreated();
++			logger.info("New thread created for runtime " + runtimeName + ", started: " + started + ", current queue length: " + mq.size());
++			mq.add(tracker);
++		}
++	}
++
++	static class MockQueueListener implements ODFRunnable {
++		String name; 
++		QueueMessageProcessor processor;
++		List<?> queue;
++		boolean cancelled = false;
++		ExecutorService service;
++		int index = 0;
++
++		public MockQueueListener(String name, List<?> q, QueueMessageProcessor qmp, boolean fromBeginning) {
++			this.name = name;
++			this.processor = qmp;
++			this.queue = q;
++			if (fromBeginning) {
++				index = 0;
++			} else {
++				index = q.size();
++			}
++		}
++
++		long WAITTIMEMS = 100;
++
++		boolean isValidIndex() {
++			return index >= 0 && index < queue.size();
++		}
++
++		@Override
++		public void run() {
++			logger.info("MockQueueManager thread " + name + " started");
++
++			while (!cancelled) {
++			//	logger.info("Queue consumer " + name + ": checking index " + index + " on queue of size " + queue.size());
++				if (!isValidIndex()) {
++					try {
++						Thread.sleep(WAITTIMEMS);
++					} catch (InterruptedException e) {
++						e.printStackTrace();
++					}
++				} else {
++					Object obj = queue.get(index);
++					String msg;
++					try {
++						msg = JSONUtils.toJSON(obj);
++					} catch (JSONException e) {
++						e.printStackTrace();
++						cancelled = true;
++						return;
++					}
++					this.processor.process(service, msg, 0, index);
++					logger.finest("MockQConsumer " + name + ": Processed message: " + msg);
++					index++;
++				}
++			}
++			logger.info("MockQueueManager thread finished");
++
++		}
++
++
++		@Override
++		public void setExecutorService(ExecutorService service) {
++			this.service = service;
++		}
++
++		@Override
++		public void cancel() {
++			cancelled = true;
++		}
++
++		@Override
++		public boolean isReady() {
++			return true;
++		}
++
++	}
++
++	ODFRunnable createQueueListener(String name, List<?> queue, QueueMessageProcessor qmp, boolean fromBeginning) {
++		return new MockQueueListener(name, queue, qmp, fromBeginning);
++	}
++
++	@Override
++	public void enqueueInStatusQueue(StatusQueueEntry sqe) {
++		sqe = cloneObject(sqe);
++		statusQueue.add(sqe);
++	}
++
++	@Override
++	public void enqueueInAdminQueue(AdminMessage message) {
++		message = cloneObject(message);
++		adminQueue.add(message);
++	}
++
++	public static class MockMessagingStatus extends MessagingStatus {
++		String message;
++
++		public String getMessage() {
++			return message;
++		}
++
++		public void setMessage(String message) {
++			this.message = message;
++		}
++
++	}
++
++	@Override
++	public MessagingStatus getMessagingStatus() {
++		MockMessagingStatus mms = new MockMessagingStatus();
++		mms.setMessage("OK");
++		return mms;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java
+new file mode 100755
+index 0000000..f69513c
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java
+@@ -0,0 +1,72 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.notification;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.notification.NotificationListener;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.notification.NotificationManager;
++import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
++
++public class NotificationManagerTest extends ODFTestBase {
++
++	@Test
++	public void testNotifications() throws Exception {
++		NotificationManager nm = new ODFInternalFactory().create(NotificationManager.class);
++		Assert.assertNotNull(nm);
++		log.info("Notification manager found " + nm.getClass().getName());
++		Assert.assertTrue(nm instanceof TestNotificationManager);
++		List<NotificationListener> listeners = nm.getListeners();
++		Assert.assertTrue(listeners.size() > 0);
++
++		OpenDiscoveryFramework odf = new ODFFactory().create();
++		List<String> dataSetIDs = Collections.singletonList("successID");
++		String id = ODFAPITest.runRequest(dataSetIDs, odf.getAnalysisManager());
++		ODFAPITest.waitForRequest(id, odf.getAnalysisManager());
++
++		int polls = 20;
++		boolean found = false;
++		boolean foundFinished = false;
++		do {
++			// now check that trackers were found through the notification mechanism
++			log.info("Checking that trackers were consumed, " + polls + " seconds left");
++			List<AnalysisRequestTracker> trackers = new ArrayList<>(TestNotificationManager.receivedTrackers);
++			log.info("Received trackers: " + trackers.size());
++			for (AnalysisRequestTracker tracker : trackers) {
++				String foundId = tracker.getRequest().getId();
++				if (foundId.equals(id)) {
++					found = true;
++					if (tracker.getStatus().equals(STATUS.FINISHED)) {
++						foundFinished = true;
++					}
++				}
++			}
++			polls--;
++			Thread.sleep(1000);
++		} while (!found && !foundFinished && polls > 0);
++		Assert.assertTrue(found);
++		Assert.assertTrue(foundFinished);
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java
+new file mode 100755
+index 0000000..80252d6
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java
+@@ -0,0 +1,66 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.notification;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
++import org.apache.atlas.odf.core.notification.NotificationListener;
++import org.apache.atlas.odf.core.notification.NotificationManager;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++public class TestNotificationManager implements NotificationManager {
++
++	public static class TestListener1 implements NotificationListener {
++
++		@Override
++		public String getTopicName() {
++			return "odf-status-topic";
++		}
++
++		@Override
++		public void onEvent(String event, OpenDiscoveryFramework odf) {
++			try {
++				StatusQueueEntry sqe = JSONUtils.fromJSON(event, StatusQueueEntry.class);
++				AnalysisRequestTracker tracker = sqe.getAnalysisRequestTracker();
++				if (tracker != null) {
++					receivedTrackers.add(tracker);					
++				}
++			} catch (JSONException e) {
++				throw new RuntimeException(e);
++			}
++		}
++
++		@Override
++		public String getName() {
++			return this.getClass().getName();
++		}
++
++	}
++
++	public static List<AnalysisRequestTracker> receivedTrackers = Collections.synchronizedList(new ArrayList<AnalysisRequestTracker>());
++
++	@Override
++	public List<NotificationListener> getListeners() {
++		List<NotificationListener> result = new ArrayList<>();
++		result.add(new TestListener1());
++		return result;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java
+new file mode 100755
+index 0000000..8a8d9a8
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java
+@@ -0,0 +1,114 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.runtime;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
++
++public class RuntimeExtensionTest extends ODFTestBase {
++
++	static final String SERVICE_ON_TEST_RUNTIME = "testruntimeservice";
++
++	List<String> getNames(List<ServiceRuntime> rts) {
++		List<String> result = new ArrayList<>();
++		for (ServiceRuntime rt : rts) {
++			result.add(rt.getName());
++		}
++		return result;
++	}
++
++	@Test
++	public void testActiveRuntimes() {
++		List<String> allNames = getNames(ServiceRuntimes.getAllRuntimes());
++		Assert.assertTrue(allNames.contains(TestServiceRuntime.TESTSERVICERUNTIME_NAME));
++
++		List<String> activeNames = getNames(ServiceRuntimes.getActiveRuntimes());
++		Assert.assertTrue(activeNames.contains(TestServiceRuntime.TESTSERVICERUNTIME_NAME));
++	}
++
++	@Test
++	public void testRuntimeForNewService() {
++		ServiceRuntime rt = ServiceRuntimes.getRuntimeForDiscoveryService(SERVICE_ON_TEST_RUNTIME);
++		Assert.assertNotNull(rt);
++		Assert.assertEquals(TestServiceRuntime.TESTSERVICERUNTIME_NAME, rt.getName());
++	}
++
++	static Object lock = new Object();
++
++	@Test
++	public void testRuntimeExtensionSimple() throws Exception {
++		synchronized (lock) {
++			OpenDiscoveryFramework odf = new ODFFactory().create();
++			TestServiceRuntime.runtimeBlocked = false;
++			AnalysisRequest request = ODFAPITest.createAnalysisRequest(Collections.singletonList(ODFAPITest.DUMMY_SUCCESS_ID));
++			request.setDiscoveryServiceSequence(Collections.singletonList(SERVICE_ON_TEST_RUNTIME));
++			log.info("Starting service for test runtime");
++			AnalysisResponse resp = odf.getAnalysisManager().runAnalysis(request);
++			String requestId = resp.getId();
++			Assert.assertTrue(ODFAPITest.waitForRequest(requestId, odf.getAnalysisManager(), 40, State.FINISHED));
++			Assert.assertTrue(TestServiceRuntime.requests.contains(requestId));
++			log.info("testRuntimeExtensionSimple finished");
++
++			// block runtime again to restore state before testcase
++			TestServiceRuntime.runtimeBlocked = true;
++			Thread.sleep(5000);
++		}
++	}
++
++	@Test
++	public void testBlockedRuntimeExtension() throws Exception {
++		synchronized (lock) {
++			OpenDiscoveryFramework odf = new ODFFactory().create();
++			TestServiceRuntime.runtimeBlocked = true;
++			AnalysisRequest request = ODFAPITest.createAnalysisRequest(Collections.singletonList(ODFAPITest.DUMMY_SUCCESS_ID));
++			request.setDiscoveryServiceSequence(Collections.singletonList(SERVICE_ON_TEST_RUNTIME));
++			log.info("Starting service for test runtime");
++			AnalysisResponse resp = odf.getAnalysisManager().runAnalysis(request);
++			String requestId = resp.getId();
++			Assert.assertFalse(resp.isInvalidRequest());
++			log.info("Checking that service is not called");
++			for (int i = 0; i < 5; i++) {
++				Assert.assertFalse(TestServiceRuntime.requests.contains(requestId));
++				Thread.sleep(1000);
++			}
++			log.info("Unblocking runtime...");
++			TestServiceRuntime.runtimeBlocked = false;
++			Thread.sleep(5000); // give service time to start consumption
++			log.info("Checking that request has finished");
++			Assert.assertTrue(ODFAPITest.waitForRequest(requestId, odf.getAnalysisManager(), 40, State.FINISHED));
++			log.info("Checking that service was called");
++			Assert.assertTrue(TestServiceRuntime.requests.contains(requestId));
++			log.info("testBlockedRuntimeExtension finished");
++			
++			// block runtime again to restore state before testcase
++			TestServiceRuntime.runtimeBlocked = true;
++			Thread.sleep(5000);
++		}
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java
+new file mode 100755
+index 0000000..d16e10a
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java
+@@ -0,0 +1,80 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.runtime;
++
++import java.util.HashSet;
++import java.util.Set;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++
++public class TestServiceRuntime implements ServiceRuntime {
++
++	static Logger logger = ODFTestLogger.get();
++
++	public static final String TESTSERVICERUNTIME_NAME = "TestServiceRuntime";
++	
++	public static boolean runtimeBlocked = true;
++
++	@Override
++	public String getName() {
++		return TESTSERVICERUNTIME_NAME;
++	}
++
++	@Override
++	public long getWaitTimeUntilAvailable() {
++		if (runtimeBlocked) {
++			return 1000;
++		}
++		return 0;
++	}
++
++	public static Set<String> requests = new HashSet<>();
++
++	public static class DSProxy extends SyncDiscoveryServiceBase {
++
++		@Override
++		public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++			logger.info("Running test runtime service");
++			requests.add(request.getOdfRequestId());
++			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
++			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++			resp.setDetails("Test success");
++			return resp;
++		}
++	}
++
++	@Override
++	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
++		return new DSProxy();
++	}
++
++	@Override
++	public String getDescription() {
++		return "TestServiceRuntime description";
++	}
++
++	@Override
++	public void validate(DiscoveryServiceProperties props) throws ValidationException {
++	}
++
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java
+new file mode 100755
+index 0000000..30848bd
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java
+@@ -0,0 +1,59 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.spark;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++public class MockSparkServiceExecutor implements SparkServiceExecutor {
++	Logger logger = Logger.getLogger(MockSparkServiceExecutor.class.getName());
++
++	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer) {
++		DataSetCheckResult checkResult = new DataSetCheckResult();
++		checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
++		return checkResult;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request) {
++		logger.log(Level.INFO, "Starting Spark mock application.");
++		DiscoveryServiceSparkEndpoint sparkEndpoint;
++		try {
++			sparkEndpoint = JSONUtils.convert(dsri.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++		if (sparkEndpoint.getJar() == null) {
++			throw new RuntimeException("Spark application is not set in Spark endpoint.");
++		}
++		logger.log(Level.INFO, "Application name is {0}.", sparkEndpoint.getJar());
++		logger.log(Level.INFO, "Spark application finished.");
++		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++		response.setDetails("Discovery service completed successfully.");
++		return  response;
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java
+new file mode 100755
+index 0000000..661cfe2
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java
+@@ -0,0 +1,91 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.spark;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.logging.Level;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.core.metadata.DefaultMetadataStore;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++
++public class SimpleSparkDiscoveryServiceTest extends ODFTestBase {
++
++	public static int WAIT_MS_BETWEEN_POLLING = 500;
++	public static int MAX_NUMBER_OF_POLLS = 500;
++	
++	@Test
++	public void testSparkService() throws Exception{
++		log.info("Running request ");
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++		AnalysisRequest request = new AnalysisRequest();
++		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
++		MetadataStore mds = new ODFFactory().create().getMetadataStore();
++		if (!(mds instanceof DefaultMetadataStore)) {
++			throw new RuntimeException(MessageFormat.format("This tests does not work with metadata store implementation \"{0}\" but only with the DefaultMetadataStore.", mds.getClass().getName()));
++		}
++		DefaultMetadataStore defaultMds = (DefaultMetadataStore) mds;
++		defaultMds.resetAllData();
++		RelationalDataSet dataSet = new DataFile();
++		MetaDataObjectReference ref = new MetaDataObjectReference();
++		ref.setId("datafile-mock");
++		dataSet.setReference(ref);
++		defaultMds.createObject(dataSet);
++		defaultMds.commit();
++		dataSetRefs.add(dataSet.getReference());
++		request.setDataSets(dataSetRefs);
++		List<String> serviceIds = Arrays.asList(new String[]{"spark-service-test"});
++		request.setDiscoveryServiceSequence(serviceIds);
++
++		log.info("Starting analyis");
++		AnalysisResponse response = analysisManager.runAnalysis(request);
++		Assert.assertNotNull(response);
++		String requestId = response.getId();
++		Assert.assertNotNull(requestId);
++		log.info("Request id is " + requestId + ".");
++
++		log.info("Waiting for request to finish");
++		AnalysisRequestStatus status = null;
++		int maxPolls = MAX_NUMBER_OF_POLLS;
++		do {
++			status = analysisManager.getAnalysisRequestStatus(requestId);
++			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
++			maxPolls--;
++			try {
++				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
++			} catch (InterruptedException e) {
++				log.log(Level.INFO, "Exception thrown: ", e);
++			}
++		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
++		if (maxPolls == 0) {
++			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
++		}
++		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, status.getState());
++		log.log(Level.INFO, "Request ''{0}'' is finished.", requestId);
++	}
++}
+diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java
+new file mode 100755
+index 0000000..191d337
+--- /dev/null
++++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java
+@@ -0,0 +1,80 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.store;
++
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class MockConfigurationStorage implements ODFConfigurationStorage {
++
++	static JSONObject config;
++
++	static {
++		try {
++			config = new JSONObject(MockConfigurationStorage.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json"));
++		} catch (JSONException e) {
++			// TODO Auto-generated catch block
++			e.printStackTrace();
++			throw new RuntimeException(e);
++		}
++	}
++
++	@Override
++	public void storeConfig(ConfigContainer container) {
++		try {
++			config = JSONUtils.toJSONObject(container);
++		} catch (Exception e) {
++			// TODO Auto-generated catch block
++			e.printStackTrace();
++			throw new RuntimeException(e);
++		}
++	}
++
++	@Override
++	public ConfigContainer getConfig(ConfigContainer defaultConfig) {
++		try {
++			return JSONUtils.fromJSON(config.write(), ConfigContainer.class);
++		} catch (Exception e) {
++			// TODO Auto-generated catch block
++			e.printStackTrace();
++			throw new RuntimeException(e);
++		}
++	}
++
++	@Override
++	public void onConfigChange(ConfigContainer container) {
++		// TODO Auto-generated method stub
++
++	}
++
++	@Override
++	public void addPendingConfigChange(String changeId) {
++		// do nothing
++	}
++
++	@Override
++	public void removePendingConfigChange(String changeId) {
++		// do nothing
++	}
++
++	@Override
++	public boolean isConfigChangePending(String changeId) {
++		return false;
++	}
++
++}
+diff --git a/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt b/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt
+new file mode 100755
+index 0000000..25eb233
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt
+@@ -0,0 +1 @@
++TestServiceRuntime
+\ No newline at end of file
+diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json
+new file mode 100755
+index 0000000..34dbf78
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json
+@@ -0,0 +1,111 @@
++{  
++   "prop1":"mystring",
++   "prop2":999,
++   "prop3":999.999,
++   "obj1":{  
++      "prop1":"mystring",
++      "prop2":999,
++      "prop3":999.999
++   },
++   "arr1":[  
++      {  
++         "prop1":"mystring",
++         "prop2":999,
++         "prop3":999.999
++      }
++   ],
++   "obj2":{  
++      "prop1":"mystring",
++      "prop2":999,
++      "prop3":999.999,
++      "nobj21":{  
++         "prop1":"mystring",
++         "prop2":999,
++         "prop3":999.999,
++         "nnarr211":[  
++            {  
++               "prop1":"mystring",
++               "prop2":999,
++               "prop3":999.999
++            }
++         ]
++      },
++      "narr21":[  
++         {  
++            "prop1":"mystring",
++            "prop2":999,
++            "prop3":999.999,
++            "nnarr211":[  
++               {  
++                  "prop1":"mystring",
++                  "prop2":999,
++                  "prop3":999.999
++               }
++            ]
++         }
++      ]
++   },
++   "obj3":{  
++      "prop1":"mystring",
++      "prop2":999,
++      "prop3":999.999,
++      "nobj31":{  
++         "prop1":"mystring",
++         "prop2":999,
++         "prop3":999.999,
++         "nnobj31":{  
++            "prop1":"mystring",
++            "prop2":999,
++            "prop3":999.999
++         }
++      },
++      "narr31":[  
++         {  
++            "prop1":"mystring",
++            "prop2":999,
++            "prop3":999.999,
++            "nnarr311":[  
++               {  
++                  "prop1":"mystring",
++                  "prop2":999,
++                  "prop3":999.999,
++                  "nnnarr3111":[  
++                     {  
++                        "prop1":"mystring",
++                        "prop2":999,
++                        "prop3":999.999
++                     }
++                  ]
++               }
++            ]
++         }
++      ]
++   },
++   "obj4":{  
++      "prop1":"mystring",
++      "prop2":999,
++      "prop3":999.999,
++      "nobj41":{  
++         "prop1":"mystring",
++         "prop2":999,
++         "prop3":999.999,
++         "nobj411":{  
++            "prop1":"mystring",
++            "prop2":999,
++            "prop3":999.999,
++            "nnnarr4111":[  
++               {  
++                  "prop1":"mystring",
++                  "prop2":999,
++                  "prop3":999.999,
++                  "nnobj41111":{  
++                     "prop1":"mystring",
++                     "prop2":999,
++                     "prop3":999.999
++                  }
++               }
++            ]
++         }
++      }
++   }
++}
+diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json
+new file mode 100755
+index 0000000..146748d
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json
+@@ -0,0 +1,8 @@
++{
++    "javaClass": "aHopefullyUnknownClass",
++	"profiledObject": null,
++	"annotationType": "MySubType",
++	"analysisRun": "bla",
++	"newProp1": "newProp1Value",
++	"newProp2": 4237
++}
+diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json
+new file mode 100755
+index 0000000..9757e51
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json
+@@ -0,0 +1,114 @@
++{
++	"odf" : {
++		"instanceId" : "odf-default-id-CHANGEME",
++		"odfUrl" : "https://localhost:58081/odf-web-1.2.0-SNAPSHOT",
++		"odfUser" : "odf",
++		"odfPassword" : "ZzTeX3hKtVORgks+2TaLPWxerucPBoxK",
++		"runNewServicesOnRegistration": false,
++		"runAnalysisOnImport": false,
++		"reuseRequests": true,
++		"discoveryServiceWatcherWaitMs": 2000,
++		"enableAnnotationPropagation": true,
++		"messagingConfiguration": {
++			"type": "com.ibm.iis.odf.api.settings.KafkaMessagingConfiguration",
++			"analysisRequestRetentionMs": 86400000,
++			"queueConsumerWaitMs": 2000,
++			"kafkaBrokerTopicReplication": 1,
++			"kafkaConsumerConfig": {
++				"offsetsStorage": "kafka",
++				"zookeeperSessionTimeoutMs": 400,
++				"zookeeperConnectionTimeoutMs": 6000
++			}
++		},
++		"userDefined": {
++		}
++	},
++	"registeredServices": [{
++			"id": "asynctestservice",
++			"name": "Async test",
++			"description": "The async test service",
++			"resultingAnnotationTypes": [
++				"AsyncTestDummyAnnotation"
++			],
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.discoveryservice.TestAsyncDiscoveryService1"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "asynctestservice-with-annotations",
++			"name": "Async test including metadata access",
++			"description": "The async test service writing annotations",
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.discoveryservice.TestAsyncDiscoveryServiceWritingAnnotations1"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "synctestservice",
++			"name": "Sync test",
++			"description": "The Sync test service",
++			"resultingAnnotationTypes": [
++				"SyncTestDummyAnnotation"
++			],
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.discoveryservice.TestSyncDiscoveryService1"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "synctestservice-with-annotations",
++			"name": "Sync test with annotations",
++			"description": "The Sync test service writing annotations",
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "synctestservice-with-extendedannotations",
++			"name": "Sync test with extended annotations",
++			"description": "The Sync test service writing annotations with extension mechanism",
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "synctestservice-with-json-annotations",
++			"name": "Sync test with json annotations",
++			"description": "The Sync test service writing annotations returned from a json file",
++			"endpoint": {
++				"runtimeName": "Java",
++				"className": "com.ibm.iis.odf.core.test.annotation.TestSyncDiscoveryServiceWritingJsonAnnotations"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "spark-service-test",
++			"name": "Simple Spark mock test",
++			"description": "The Spark test is calling a mock version of the SparkAppExecutor",
++			"endpoint": {
++				"runtimeName": "Spark",
++				"inputMethod": "DataFrame",
++				"jar": "my-example-application-jar",
++				"className": "my-example-class-name"
++			},
++			"parallelismCount" : 2
++		},
++		{
++			"id": "testruntimeservice",
++			"name": "Runtime test service",
++			"description": "Runtime test service description",
++			"endpoint": {
++				"runtimeName": "TestServiceRuntime"
++			}
++		}
++		
++	]
++}
+diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json
+new file mode 100755
+index 0000000..b884aca
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json
+@@ -0,0 +1,31 @@
++{
++	"user": "isadmin",
++	"lastModified": "1443795291000",
++	"discoveryServiceRequests": [{
++		"dataSetContainer": {
++			"dataSet": {
++				"javaClass": "com.ibm.iis.odf.core.metadata.models.Document",
++				"name": "someDocument",
++				"reference": {
++					"id": "testdataset"
++				}
++			}
++		},
++		"discoveryServiceId": "testservice"
++	}],
++	"nextDiscoveryServiceRequest": 1,
++	"request": {
++		"dataSets": [{
++			"id": "testdataset"
++		}],
++		"id": "testid"
++	},
++	"status": "FINISHED",
++	"statusDetails": "All discovery services run successfully",
++	"discoveryServiceResponses": [{
++		"type": "async",
++		"runId": "IARUNID6f49fdfd-89ce-4d46-9067-b3a4db4698ba",
++		"details": "IA has run successfully",
++		"code": "OK"
++	}]
++}
+diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..18109c4
+--- /dev/null
++++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,20 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++## USE for TESTs only
++
++
++ODFConfigurationStorage=MockConfigurationStorage
++DiscoveryServiceQueueManager=MockQueueManager
++SparkServiceExecutor=MockSparkServiceExecutor
++NotificationManager=TestNotificationManager
+diff --git a/odf/odf-doc/.gitignore b/odf/odf-doc/.gitignore
+new file mode 100755
+index 0000000..32d60cc
+--- /dev/null
++++ b/odf/odf-doc/.gitignore
+@@ -0,0 +1,6 @@
++target
++.settings
++.classpath
++.project
++.factorypath
++.DS_Store
+diff --git a/odf/odf-doc/README.txt b/odf/odf-doc/README.txt
+new file mode 100755
+index 0000000..80dbb61
+--- /dev/null
++++ b/odf/odf-doc/README.txt
+@@ -0,0 +1,3 @@
++The documentation project is based on the Maven Site Plugin and Maven Doxia. The resulting war file is merged into the war file of the sdp-web project using the overlay mechanism of the Maven War Plugin. The resulting documentation is available through the getting started page of the SDP web console. 
++
++Edit the src/site/markdown/*.md files in order to update the documentation. The structure of the web site can be changed in file src/site/site.xml.
+diff --git a/odf/odf-doc/pom.xml b/odf/odf-doc/pom.xml
+new file mode 100755
+index 0000000..6ebffcf
+--- /dev/null
++++ b/odf/odf-doc/pom.xml
+@@ -0,0 +1,163 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-doc</artifactId>
++	<packaging>war</packaging>
++	<dependencies>
++		<dependency>
++			<groupId>javax.ws.rs</groupId>
++			<artifactId>jsr311-api</artifactId>
++			<version>1.1.1</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<!-- The following dependencies are required by Spark Discovery Services only and are provided by the Spark cluster -->
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-core_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-sql_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++	</dependencies>
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-war-plugin</artifactId>
++				<version>2.6</version>
++				<configuration>
++					<webResources>
++						<resource>
++							<directory>${project.build.directory}/site</directory>
++							<targetPath>/doc</targetPath>
++						</resource>
++					</webResources>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-javadoc-plugin</artifactId>
++				<version>2.10.3</version>
++				<configuration>
++					<sourcepath>${basedir}/../odf-api/src/main/java</sourcepath>
++					<outputDirectory>${project.build.directory}/doc</outputDirectory>
++					<excludePackageNames>org.apache.atlas.odf.core.metadata.atlas:org.apache.atlas.odf.core.metadata.importer:org.apache.atlas.odf.core.metadata.internal:org.apache.atlas.odf.json</excludePackageNames>
++				</configuration>
++				<executions>
++					<execution>
++						<id>generate-javadocs</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>javadoc</goal>
++						</goals>
++ 					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-site-plugin</artifactId>
++				<version>3.3</version>
++				<configuration>
++					<port>9000</port>
++					<tempWebappDirectory>${basedir}/target/site/tempdir</tempWebappDirectory>
++					<generateProjectInfo>false</generateProjectInfo>
++					<generateReports>false</generateReports>
++					<inputEncoding>UTF-8</inputEncoding>
++					<outputEncoding>UTF-8</outputEncoding>
++				</configuration>
++				<executions>
++					<execution>
++						<id>generate-html</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>site</goal>
++						</goals>
++					</execution>
++				</executions>
++				<dependencies>
++					<dependency>
++						<groupId>org.apache.maven.doxia</groupId>
++						<artifactId>doxia-module-markdown</artifactId>
++						<version>1.3</version>
++					</dependency>
++				</dependencies>
++			</plugin>
++			<!--  this section compiles the tutorial project to check if the code is valid.
++			 -->
++			 <!--
++			<plugin>
++				<artifactId>maven-invoker-plugin</artifactId>
++				<version>2.0.0</version>
++				<configuration>
++					<projectsDirectory>src/site/resources/tutorial-projects</projectsDirectory>
++					<cloneProjectsTo>${project.build.directory}/tutorial-projects-build</cloneProjectsTo>
++				</configuration>
++				<executions>
++					<execution>
++						<id>compile-tutorial-projects</id>
++						<goals>
++							<goal>run</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++			 -->
++		</plugins>
++	</build>
++	<profiles>
++		<profile>
++			<!--  Turn off additional checks for maven-javadoc-plugin that will cause build errors when using Java 8 -->
++			<!--  See http://stackoverflow.com/questions/22528767/how-to-work-around-the-stricter-java-8-javadoc-when-using-maven -->
++			<id>disable-java8-doclint</id>
++			<activation>
++				<jdk>[1.8,)</jdk>
++			</activation>
++			<properties>
++				<additionalparam>-Xdoclint:none</additionalparam>
++			</properties>
++		</profile>
++	</profiles>
++</project>
+diff --git a/odf/odf-doc/src/main/webapp/WEB-INF/web.xml b/odf/odf-doc/src/main/webapp/WEB-INF/web.xml
+new file mode 100755
+index 0000000..97fb61b
+--- /dev/null
++++ b/odf/odf-doc/src/main/webapp/WEB-INF/web.xml
+@@ -0,0 +1,21 @@
++<!--
++~ (C) Copyright IBM Corp. 2017
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<!DOCTYPE web-app PUBLIC
++ "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
++ "http://java.sun.com/dtd/web-app_2_3.dtd" >
++<web-app>
++  <display-name>odf-doc</display-name>
++</web-app>
+diff --git a/odf/odf-doc/src/site/markdown/api-reference.md b/odf/odf-doc/src/site/markdown/api-reference.md
+new file mode 100755
+index 0000000..4c25a53
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/api-reference.md
+@@ -0,0 +1,5 @@
++# API reference
++
++[General ODF API reference](../swagger).
++
++[Java Docs for ODF services](./apidocs/index.html)
+diff --git a/odf/odf-doc/src/site/markdown/build.md b/odf/odf-doc/src/site/markdown/build.md
+new file mode 100755
+index 0000000..70e82df
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/build.md
+@@ -0,0 +1,99 @@
++# Build
++
++This page describes how to build ODF.  
++
++## Prerequisites
++
++You need git, Maven, and Python (2.7 (not 3!)) available on the command line.
++If you run these commands and you see similar output you should be all set:
++
++	$ mvn -v
++	Apache Maven 3.3.9 (bb52d8502b132ec0a5a3f4c09453c07478323dc5; 2015-11-10T17:41:47+01:00)
++	...
++
++	$ python -V
++	Python 2.7.10
++
++	$ git --version
++	git version 2.7.4
++
++The build currently depends on the [IA data quality project][3] (https://github.com/YSAILLET/dataquality) to be available. This can be achieved in two ways:
++1. Make sure that you can reach the [IIS Maven Repository][1]. If this doesn't work, try to authenticate against the Littleton firewall, e.g., by opening the [RTC dashboard][2].
++2. Build the [IA data quality project][3] on your local machine, typically with these commands
++
++	git clone https://github.com/dataquality
++	cd dataquality
++	mvn clean install
++
++
++### Additional Prerequisites on Windows
++
++- For the build: The directory C:\tmp needs to exist
++- For the tests and the test environment to run properly: The `HADOOP_HOME` environment variable must be set to a location where the Hadoop [winutils.exe](http://public-repo-1.hortonworks.com/hdp-win-alpha/winutils.exe) file is available in a the bin folder. For example, if the environment variable is set to `HADOOP_HOME=c:\hadoop`, the file needs to be available at `c:\hadoop\bin\winutils.exe`.
++- In your Maven install directory go to bin and copy mvn.cmd to mvn.bat
++
++## Building
++
++To build, clone the repository and perform a maven build in the toplevel directory. These commands should do the trick:
++
++	git clone https://github.com/Analytics/open-discovery-framework.git
++	cd open-discovery-framework
++	mvn clean install
++
++Add the `-Dreduced-build` option to build and test only the core components and services of ODF:
++
++	mvn clean install -Dreduced-build
++
++## Fast build without tests or with reduced tests
++
++To build without running tests run maven with the following options (The second one prevents the test Atlas instance from being started and stopped):
++
++	mvn clean install -DskipTests -Duse.running.atlas
++
++Use the `-Dreduced-tests` option to run only a reduced set of tests:
++
++	mvn clean install -Dreduced-tests
++
++This will skip all integration tests (i.e. all tests that involve Atlas) and also some of the long running tests. The option may be combined with the `-Dreduced-build` option introduced above.
++
++## Building the test environment
++
++You can build a test environment that contains Atlas and
++Kafka, and Jetty by running these commands:
++
++	cd odf-test-env
++	mvn package
++
++This will create a zip file with the standalone test environment under
++``odf-test-env/target/odf-test-env-0.1.0-SNAPSHOT-bin.zip``.
++See the contents of this zip file or the [documentation section on the test environment](test-env.html)
++for details.
++
++Congrats! You have just built ODF.
++This should be enough to get you going. See below for additional information
++on different aspects of the build.
++
++## Additional Information
++
++### Working with Eclipse
++
++To build with Eclipse you must have the maven m2e plugin and EGit installed (e.g., search for "m2e maven integration for eclipse" and "egit", respectively, on the Eclipse marketplace).
++
++- Clone the repository into some directory as above, e.g., /home/code/odf.
++- Open Eclipse with a workspace in a different directory.
++- Go to File -> Import -> Maven -> Existing Maven projects.
++- Enter /home/code/odf as the root directory.
++- Select all projects and click Finish.
++- Internally, Eclipse will now perform Maven builds but you can work with the code as usual.
++
++If you want to build via Run configurations be aware that this will not work with the embedded
++maven provided by the m2e plugin. Instead you will have to do this:
++
++- Open Windows -> Preferences -> Maven -> Installations
++- Add a new installation pointing to your external Maven installation
++- For each run configuration you use, select the new installation in the Maven runtime dropdown
++(you might also have to set JAVA_HOME in the environment tab).
++
++  [1]: http://iis-repo.swg.usma.ibm.com:8080/archiva/repository/all/
++  [2]: https://ips-rtc.swg.usma.ibm.com/jazz/web/projects
++  [3]: https://github.ibm.com/YSAILLET/dataquality
+diff --git a/odf/odf-doc/src/site/markdown/configuration.md b/odf/odf-doc/src/site/markdown/configuration.md
+new file mode 100755
+index 0000000..a025a48
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/configuration.md
+@@ -0,0 +1 @@
++# Configuration
+diff --git a/odf/odf-doc/src/site/markdown/data-model.md b/odf/odf-doc/src/site/markdown/data-model.md
+new file mode 100755
+index 0000000..4bd389e
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/data-model.md
+@@ -0,0 +1,106 @@
++# Data Model
++
++This section describes the basic data model of how results of discovery services are
++stored and how new discovery services can extend and enrich this model.
++
++See the section [ODF Metadata API](odf-metadata-api.html) for general information
++on how to retrieve metadata.
++
++You can find the current Atlas data model in the file
++
++	odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
++
++which contains JSON that can be POSTed to the Atlas `types` REST resource to create those types.
++
++## Annotations
++
++All discovery services results are called "annotations". An annotation is an object
++that annotates another object with a certain piece of information.
++For instance, you could have a `DataClassAnnotation` which has a reference attribute `classifiedObject` linking it to
++a column `NAME` of a table `CUSTREC`, a list of reference attributes `classifyingObjects` linking it to business terms.
++An additional attribute `confidences` might provide a list of numeric confidence values indicating the "strength" of the
++relationship between the classifiedObject and the respective classifyingObject (business term).
++ For column `NAME` the list of classifying objects may have a single entry `Customer Name` the list of confidence values
++ may have a single value `0.7`.
++This annotation expresses the fact that the term classification services registered and active in ODF have come up with a
++70% confidence of CUSTREC.NAME representing a customer name.
++
++Technically, an annotation is a subtype of one of the three *base types* which are subtyes of the (abstract)
++Atlas type `Annotation`:
++
++- `ProfilingAnnotation`
++- `ClassificationAnnotation`
++- `RelationshipAnnotation`
++
++
++A `ProfilingAnnotation` assigns non-reference attributes to an object. It has the following non-reference attributes:
++
++- `annotationType`: The type of annotation. A Json string of the form
++   `{"stdType": "DataQualityAnnotation",`
++   ` "runtime": "JRE",`
++   ` "spec" : “org.apache.atlas.myservice.MyAnnotation"`
++   `}`
++   where `stdType` is a base or standardized type name (see below for standardized types), `runtime` names the runtime,
++   and `spec` is a runtime-specific string which helps the runtime to deal with instances of this type. In case of a Java
++   runtime the `spec` is the name of the implementing class which may be a subclass of the `stdType`.
++- `analysisRun`: A string that is set to the request Id of the analysis that created it.
++(Compare the swagger documentation of the REST resource `analyses`, e.g. [here](https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT/swagger/#/analyses).
++*Internal Note*: will be replaced with RID
++- `summary`: A human-readable string that presents a short summary of the annotation.
++Might be used in generic UIs for displaying unknown annotations.
++*Internal note*: deprecated
++- `jsonProperties`: A string attributes where you can store arbitrary JSON as a string.
++Can be used to 'extend' standard annotations.
++
++...and a single referencing attribute:
++
++- `profiledObject`: The object that is annotated by this annotation. In the example above,
++this would point to the Column object.
++
++
++A `ClassificationAnnotation` assigns any number (including 0) of meta data objects to an object.
++It has the same non-reference attributes as `ProfilingAnnotation` plus the following reference attributes:
++
++- `classifiedObject`: The object that is annotated by this annotation.
++- `classifyingObjects`: List of references to meta data objects classifying the classifiedObject.
++
++A `RelationshipAnnotation`  expresses a relationship between meta data objects.
++It has the same non-reference attributes as `ProfilingAnnotation` plus a single reference attribute:
++
++- `relatedObjects`: List of references to related meta data objects.
++
++
++Note that annotations are implemented as proper Atlas object types and not traits (labels) for these reasons:
++
++- Annotations of the same type but of different discovery service should be able co-exist, for instance,
++to be able to compare results of different services downstream.
++This is only partly possible with traits.
++- Relationships between objects can not easily be modeled with traits.
++
++A discovery service can deliver its results in a base, standardized, or *custom annotation type*. Depending on the type of the
++underlying relationship a custom annotation type might be a subtype of `ProfilingAnnotation` (asymmetric, single reference attribute),
++`ClassificationAnnotation` (asymmetric, any number of reference attributes), or `RelationshipAnnotation` (symmetric, any number
++of reference attributes). A custom annotation type can have additional non-reference attributes that are stored in its `jsonProperties`.
++
++When implemented in Java, the class defining a custom annotation has private fields and corresponding getter/setter methods
++representing the additional information.
++
++
++##Example
++
++
++For instance, creating a new annotation of type `org.apache.atlas.oli.MyAnnotation` could look like this.
++
++	public class MyAnnotation extends ClassificationAnnotation {
++	   String myNewAttribute;
++
++	   public String getMyNewAttribute() {
++	      return myNewAttribute;
++	   }
++
++	   public void setMyNewAttribute(String myNewAttribute) {
++	      this.myNewAttribute = myNewAttribute;
++	   }
++	}
++
++Annotations can be mapped into standardized meta data objects by a *propagator* which implements the `AnnotationPropagator` interface.
+diff --git a/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md b/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md
+new file mode 100755
+index 0000000..fb972f6
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md
+@@ -0,0 +1,143 @@
++# Tutorial: Build and run your first Discovery Service
++This tutorial shows how you can create your first discovery service in Java that analyzes a data set and creates a single annotation of a new type.
++This tutorial requires that you have [Maven](http://maven.apache.org/) installed.
++
++
++## Create a discovery service
++Follow these steps to create and package a Java implementation of the simplest discovery service.
++
++#### Step 1: Create ODF discovery service maven project
++Create a new Java Maven project from the ODF provided archetype ``odf-archetype-discoveryservice`` (group ID ``org.apache.atlas.odf``).
++Choose the following values for the respective parameters:
++
++| Parameter | Value                    |
++|-----------|--------------------------|
++|groupId    | odftutorials             |
++|artifactId | discoveryservicetutorial |
++|version    | 0.1                      |
++
++
++From the command line, your command may look like this:
++
++	mvn archetype:generate -DarchetypeGroupId=org.apache.atlas.odf -DarchetypeArtifactId=odf-archetype-discoveryservice -DarchetypeVersion=0.1.0-SNAPSHOT -DgroupId=odftutorials -DartifactId=discoveryservicetutorial -Dversion=0.1
++
++This will create a new Maven project with a pom that has dependencies on ODF.
++It will also create two Java classes ``MyDiscoveryService`` and ``MyAnnotation``
++that you may want to use as a basis for the following steps.
++
++If you use Eclipse to create your project, be sure to enable the checkbox "Include snapshot archetypes" in the
++archetype selection page of the New Maven Project wizard.
++
++If you are not interested in the actual code at this point, you may skip Steps 2 through 4 and go directly
++to step 5.
++
++#### Step 2 (optional): Check the discovery service implementation class
++Create a new Java class named ``odftutorials.MyDiscoveryService`` that inherits from `org.apache.atlas.odf.core.discoveryservice.SyncDiscoveryServiceBase`.
++As the interface name indicates, our service will be synchronous, i.e., it will have a simple method ``runAnalysis()`` that returns
++the analysis result. For the implementation of long-running, asynchronous services, see TODO.
++The archetype creation has already filled in some code here that we will use. Your class
++should look something like this:
++
++	public class MyDiscoveryService extends SyncDiscoveryServiceBase {
++
++		@Override
++		public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++			// 1. create an annotation that annotates the data set object passed in the request
++			MyAnnotation annotation = new MyAnnotation();
++			annotation.setAnnotatedObject(request.getDataSetContainer().getDataSet().getReference());
++			// set a new property called "tutorialProperty" to some string
++			annotation.setMyProperty("My property was created on " + new Date());
++
++			// 2. create a response with our annotation created above
++			return createSyncResponse( //
++						ResponseCode.OK, // Everything works OK
++						"Everything worked", // human-readable message
++						Collections.singletonList(annotation) // new annotations
++			);
++		}
++	}
++
++What does the code do?
++The code basically consists of two parts:
++
++1. Create a new ``MyAnnotation`` object and annotate the data set that is passed into
++the discovery service with it.
++2. Create the discovery service response with the new annotation and return it.
++
++
++#### Step 3 (optional): Check the new annotation class
++The project also contains a new Java class called ``odftutorials.MyAnnotation``
++which extends the class ``org.apache.atlas.odf.core.metadata.ProfilingAnnotation``.
++It is a new annotation type that contains a property called ``myProperty`` of type ``String``.
++In the code you can see that there is a Java-Bean style getter and a setter method, i.e., ``getTutorialProperty()`` and
++``setTutorialProperty(String value)``.
++
++	public class MyAnnotation extends ProfilingAnnotation {
++
++		private String myProperty;
++
++		public String getMyProperty() {
++			return myProperty;
++		}
++
++		public void setMyProperty(String myValue) {
++			this.myProperty = myValue;
++		}
++	}
++
++When we return these annotations, ODF will take care that these annotations are stored
++appropriately in the metadata store.
++
++
++#### Step 4 (optional): Check the discovery service descriptor
++Lastly, the project contains a file called ``META-INF/odf/odf-services.json``
++in the ``main/resources`` folder. This file which always have to have the same
++name contains a JSON list of the the descriptions of all services defined in our project.
++The descriptions contain an ID, a name, a short human-readable description, together
++with the Java class name implementing the service. Here is how it looks like:
++
++	[
++	  {
++		"id": "odftutorials.discoveryservicetutorial.MyDiscoveryService",
++		"name": "My service",
++		"description": "My service creates my annotation for a data set",
++		"type": "Java",
++		"endpoint": "odftutorials.MyDiscoveryService"
++	  }
++	]
++
++Note that most of this information can be changed but ``type`` (this is a Java implementation)
++and ``endpoint`` (the Java class is called ``odftutorials.MyDiscoveryService``)
++should remain as the are.
++
++#### Step 5: Build the service JAR
++The service jar is a standard jar file so you can build it with a standard Maven command like
++
++	mvn clean install
++
++You can find the output jar as per the Maven convention in
++``target/discoveryservicetutorial-0.1.jar``
++
++
++## Deploy the discovery service
++Once you've built your service JAR as described in the previous section, there are two ways
++how you can deploy it.
++
++### Classpath deployment
++The simplest way to make an ODF instance pickup your new service is add the service JAR (and
++any dependent JARs) to the ODF classpath. A simple way to do this is to package the JARs into
++ODF war file. Once you (re-)start ODF, your new service should be available.
++
++## Run the discovery service
++
++Perform these steps to run your new service and inspect the results.
++
++1. Go to the Analysis tab in the ODF console
++2. Select the Data Sets tab and click on Start Analysis next to any data set
++3. Select "My Service" as the discovery service and click Submit.
++4. Select the Requests tab and click Refresh
++5. You should see a new entry showing the data set and the "My Service" discovery service.
++6. Click on Annotations. A new page will open that opens the Atlas UI with a list of the new
++annotation that was created.
++7. Click on the annotation and check the value of the "myProperty" property. It should contain
++a value like ``My property was created on  Mon Feb 01 18:31:51 CET 2016``.
+diff --git a/odf/odf-doc/src/site/markdown/discovery-services.md b/odf/odf-doc/src/site/markdown/discovery-services.md
+new file mode 100755
+index 0000000..e7a0efa
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/discovery-services.md
+@@ -0,0 +1 @@
++# Discovery services
+diff --git a/odf/odf-doc/src/site/markdown/examples.md b/odf/odf-doc/src/site/markdown/examples.md
+new file mode 100755
+index 0000000..df635b4
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/examples.md
+@@ -0,0 +1 @@
++# Examples
+diff --git a/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md b/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md
+new file mode 100755
+index 0000000..44d55f7
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md
+@@ -0,0 +1,3 @@
++# Run your first ODF analysis
++
++See the [First steps](first-steps.html) section for details on how to run the analysis from the ODF console UI.
+diff --git a/odf/odf-doc/src/site/markdown/first-steps.md b/odf/odf-doc/src/site/markdown/first-steps.md
+new file mode 100755
+index 0000000..667696c
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/first-steps.md
+@@ -0,0 +1,65 @@
++# First Steps
++
++This section assumes that you have ODF installed, either the 
++[test environment](test-env.html) or [manually](install.html).
++
++
++### ODF Console UI
++
++To open the ODF console point your browser to the ODF web application.
++In the [test environment](test-env.html), this is typically
++[https://localhost:58081/odf-web-0.1.0-SNAPSHOT](https://localhost:58081/odf-web-0.1.0-SNAPSHOT).
++
++*Note*: The links to the ODF Console in the instructions below only work if you view this documentation
++from the ODF web application.
++
++The default user of the ODF Console is odf / admin4odf.
++
++
++#### Check System health
++Go to the [System monitor](/odf-web-0.1.0-SNAPSHOT/#monitor) tab and 
++click "Check health". After a while you should see a message
++that the health check was successful. If this check fails it might be the case that the dependent services
++are not fully up and running yet (typically happens after start of the test environment), so wait a short while
++and try again.
++
++#### Discovery Services
++Take a look at all available discovery services on the tab [Discovery Services](/odf-web-0.1.0-SNAPSHOT/#discoveryServices).
++
++#### Configure Atlas repository and create sample metadata
++
++To change the URL of your Atlas installation and to create some sample data, go
++to the [Configuration](/odf-web-0.1.0-SNAPSHOT/#configuration) tab.
++In general, it is a good idea change the default URL to Atlas from "localhost" to a hostname that is accessible
++from your network. If you don't do this, you might experience some strange effects when viewing
++Atlas annotations from the web app.
++If you changed the name, click Save.
++
++Create a set of simple sample data by clicking on Create Atlas Sample Data.
++
++To explore the sample data go to the [Data Sets](/odf-web-0.1.0-SNAPSHOT/#data) tab.
++
++#### Run analysis
++
++The easiest way to start an analysis is from the [Data Sets](/odf-web-0.1.0-SNAPSHOT/#data) tab.
++In the "Data Files" section look for the sample table "BankClientsShort".
++To view the details of the table click on it anywhere in the row. The Details dialog
++shows you information about this data set. Click Close to close the dialog.
++
++To start an analysis on the "BankClientsShort" table click "Start Analysis" on the right.
++In the "New Analysis Request" dialog click on "&lt;Select a Service&gt;" to add a service to
++the sequence of discovery service to run on the data set. Then click "Submit" to start the analysis.
++
++To check the status of your request go to the 
++[Analysis](/odf-web-0.1.0-SNAPSHOT/#analysis) tab and click Refresh.
++If all went well the status is "Finished".
++Click on "View Results" to view all annotations created for this analysis request. 
++
++
++### REST API
++See the [REST API documentation](/odf-web-0.1.0-SNAPSHOT/swagger) for more details on how to
++perform the actions explained above with the REST API.
++In particular, have a look at the ``analysis`` REST resource for APIs how to start and
++monitor analyis requests.
++
++
+diff --git a/odf/odf-doc/src/site/markdown/index.md b/odf/odf-doc/src/site/markdown/index.md
+new file mode 100755
+index 0000000..e070af2
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/index.md
+@@ -0,0 +1,12 @@
++# Overview
++
++The "Open Discovery Framework" is an open metadata-based framework that strives to be a common home for different analytics technologies that discover characteristics of data sets and relationships between them (think "AppStore for discovery algorithms"). Using ODF, applications can leverage new discovery algorithms and their results with minimal integration effort.
++
++Automated characterization of information and automated discovery of relationships is key to several Analytics Platform initiatives, e.g. enable and improve self service for knowledge workers. 
++The Open Discovery Platform provides infrastructure based on open source technology to easily execute, manage and integrate diverse metadata discovery algorithms provided by internal 
++or external (open source) contributors in a single point of access. These discovery algorithms store their analysis results in a common open metadata
++repository that promotes reuse and sharing of these results.
++A simple plug-in mechanism to integrate discovery services enables users of ODF to easily combine and orchestrate algorithms built on different technologies, 
++thereby gaining deeper insights into their data.
++
++
+diff --git a/odf/odf-doc/src/site/markdown/install.md b/odf/odf-doc/src/site/markdown/install.md
+new file mode 100755
+index 0000000..af7faec
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/install.md
+@@ -0,0 +1,137 @@
++# Install ODF manually
++
++This section describes how to manually install ODF and its prerequisites.
++
++## Install ODF locally
++
++ODF is installed on an application server like jetty. Its prereqs (Kafka and Atlas)
++can run on separate machines, they simple must be reachable over the network.
++
++ODF's configuration is stored in Zookeeper which is a prereq for Kafka.
++ 
++
++### Prerequisites 
++
++ODF has two prerequisites:
++
++1. Apache Atlas (only tested with 0.6 but no hard dependency required here)
++2. Apache Kafka 0.8.2.1 which, in turn, requires Zookeper 3.4
++
++#### Apache Atlas
++
++[Apache Atlas](http://atlas.incubator.apache.org/) is an open 
++metadata infrastructure that is currently in incubator status.
++There is currently no binary download available, you have to [build it yourself](http://atlas.incubator.apache.org/InstallationSteps.html).
++Alternatively, you can download a version that we built [here](https://ibm.box.com/shared/static/of1tdea7465iaen8ywt7l1h761j0fplt.zip).
++
++After you built the distribution, simply unpack the tar ball, and run
++``bin/atlas_start.py``. Atlas will be started on port 21443 as default, so point
++your browser to [https://localhost:21443](https://localhost:21443) to look at the 
++Atlas UI.
++Note that starting Atlas can take up to a minute.
++
++To stop, run ``bin/atlas_stop.py``.
++
++See the Atlas section in the [Troubleshooting guide](troubleshooting.html)
++for common issues and how to workaround them.
++
++#### Apache Kafka
++
++[Apache Kafka](http://kafka.apache.org/) is an open source project that implements 
++a messaging infrastructure. ODF uses Kafka for notifications and queueing up requests to
++discovery services.
++
++To install Kafka, download the version 0.8.2.1 with Scala 2.10 (which is the version we used in 
++our tests) from the Kafka website, see [here](https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.10-0.8.2.1.tgz).
++
++After unpacking the tar ball these steps should get you going:
++
++1. CD to the distribution directory.
++2. Start zookeeper first by running ``bin/zookeeper-server-start.sh config/zookeeper.properties``.
++3. Start Kafka: ``bin/kafka-server-start.sh config/server.properties``
++
++By default, Zookeeper is running on port 2181 and Kafka runs on port 9092. You can change the Zookeeper
++port by changing the properties ``clientPort`` in ``config/zookeeper.properties`` and 
++``zookeeper.connect`` in ``config/server.properties``. Change the Kafka port by changing
++``port`` in ``config/server.properties``.
++
++For Windows, run the respective .bat commands in the ``bin\windows`` directory.
++
++
++### Deploy ODF 
++
++The only ODF artifact you need for deployment is the war file built by the odf-web maven project, which can typically
++be found here:
++
++	odf-web/target/odf-web-0.1.0-SNAPSHOT.war
++	
++To tell ODF which Zookeeper / Kafka to use you will need to set the 
++Java system property ``odf.zookeeper.connect`` to point
++to the Zookeeper host and port. The value is typically the same string as the ``zookeeper.connect`` property 
++in the Kafka installation ``config/server.properties`` file:
++
++	-Dodf.zookeeper.connect=zkserver.example.org:2181
++
++Note that if this property is not set, the default is ``localhost:52181``.
++ 
++
++#### Application Server
++
++ODF should run on any application server. As of now we have done most of our testing on Jetty.
++
++##### Jetty
++
++[Jetty](https://eclipse.org/jetty/) is an open source web and application server.
++We have used version 9.2.x for our testing (the most current one that supports Java 7).
++Download it from the web site [https://eclipse.org/jetty/](https://eclipse.org/jetty/).
++
++Here are some quick start instructions for creating a new Jetty base. Compare 
++the respective Jetty documentation section [here](http://www.eclipse.org/jetty/documentation/9.2.10.v20150310/quickstart-running-jetty.html#creating-jetty-base).
++
++First, for in order to enable basic authentication, the following configuration needs to be added to the `etc/jetty.xml` file, right before the closing `</Configure>` tag at the end of the file:
++
++```
++<Call name="addBean">
++	<Arg>
++		<New class="org.eclipse.jetty.security.HashLoginService">
++			<Set name="name">ODF Realm</Set>
++			<Set name="config"><SystemProperty name="jetty.home" default="."/>/etc/realm.properties</Set>
++		</New>
++	</Arg>
++</Call>
++```
++
++Secondly, a `etc/realm.properties` file needs to be added that contains the credentials of the ODF users in the following [format](http://www.eclipse.org/jetty/documentation/9.2.10.v20150310/configuring-security-authentication.html#security-realms):
++
++```
++<username>: <password>[,<rolename> ...]
++```
++
++Then, you will have to create and initialize new directory where you deploy your web apps and 
++copy the ODF war there. These commands should do the trick:
++
++	mkdir myjettybase
++	cd myjettybase
++	java -jar $JETTY_HOME\start.jar --add-to-startd=https,ssl,deploy
++	cp $ODFDIR/odf-web-0.1.0-SNAPSHOT.jar webapps
++	java -Dodf.zookeeper.connect=zkserver.example.org:2181 -jar $JETTY_HOME\start.jar
++	
++The first java command initializes the jetty base directory by creating a directory ``start.d`` which 
++contains some config files (e.g. http.ini contains the port the server runs on) and the 
++empty ``webapps`` directory.
++The copy command copies the ODF war file to the webapps folder.
++The last command starts Jetty (on default port 8443). You can stop it by hitting Ctrl-C.
++
++You should see a message like this one indicating that the app was found and started.
++
++	2016-02-26 08:28:24.033:INFO:oejsh.ContextHandler:Scanner-0: Started o.e.j.w.WebAppContext@-545d793e{/odf-web-0.1.0-SNAPSHOT,file:/C:/temp/jetty-0.0.0.0-8443-odf-web-0.1.0-SNAPSHOT.war-_odf-web-0.1.0-SNAPSHOT-any-8485458047819836926.dir/webapp/,AVAILABLE}{myjettybase\webapps\odf-web-0.1.0-SNAPSHOT.war}
++
++Point your browser to [https://localhost:8443/odf-web-0.1.0-SNAPSHOT](https://localhost:8443/odf-web-0.1.0-SNAPSHOT) to see the ODF console.
++
++
++
++##### Websphere Liberty Profile
++
++Stay tuned
++
++
+diff --git a/odf/odf-doc/src/site/markdown/jenkins-build.md b/odf/odf-doc/src/site/markdown/jenkins-build.md
+new file mode 100755
+index 0000000..3fe83bb
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/jenkins-build.md
+@@ -0,0 +1,81 @@
++# ODF Jenkins build
++
++## General
++The Jenkins build is set up at 
++[https://shared-discovery-platform-jenkins.swg-devops.com:8443](https://shared-discovery-platform-jenkins.swg-devops.com:8443).
++
++### Available jobs
++The following jobs are available:
++
++1. **Open-Discovery-Framework**: The main build on the master branch with the test environment.
++Built on Linux.
++2. **Open-Discovery-Framework-Parameters**: Job you can trigger manually for private branches
++and platforms. Using the `nodelabel` parameter with value `odfbuild` triggers the build on Linux.
++3. **Open-Discovery-Framework-Testenv**: Manages and/or installs the test env. This job is currently scheduled
++to install the current testenv on the [machine associated with label `odftestenv`](http://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT) every night at 10PM EST.
++
++The parameter `nodelabel` defines the nodes the test env is / should be installed:
++    
++- `odftestenv` for testing your private builds on [https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT](https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT).
++- `odfdemo` for the stable demo on [https://odfdemo.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT](https://odfdemo.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT).
++
++Possible actions selectable through the `action` parameter are:
++    
++- `start`: (re)start the test env
++- `stop`: stop the test env
++- `cleanconfig`: (re)starts with clean configuration and Kafka topics
++- `cleanmetadata`: (re)starts with clean metadata 
++- `cleanall`: (re)starts with cleanconfig plus cleanmetadata
++- `install`: Installs the build as specified in the `jenkinsjob` and `buildnumber` parameters.
++ 
++4. **Open-Discovery-Framework-BuildStarter**: Job polling for changes in the master branch and triggering
++the automated build. Starts the Open-Discovery-Framework Linux build. *You should typically not have to trigger this job manually!*
++
++You can find these jobs in Jenkins in the [1-ODF](https://shared-discovery-platform-jenkins.swg-devops.com:8443/view/1-ODF/) tab. 
++
++### Node labels
++This Jenkins system currently contains two kinds of slaves which are distinguished by a
++so called [node label](https://www.safaribooksonline.com/library/view/jenkins-the-definitive/9781449311155/ch11s04.html).
++
++We currently have these node labels:
++
++1. `odfbuild`: Linux build 
++2. `odftestenv`: Machine sdp1.rtp.raleigh.ibm.com where test envs can be deployed regularly for internal testing.
++
++
++### Some Important Settings
++
++- Use the profile `jenkinsbuild`. This is currently only used in the Bluemix Services and requires that the Bluemix password is not read from the `cf.password` system property but rather from the env var `CFPASSWORD`. This is only done so that the password doesn't appear in the log.
++- The firewall is smashed with a script called `smashlittletonfirewall.sh` (see below). You have to set the env var
++`INTRANETCREDENTIALS` from Jenkins as a combined credential variable (of the form user:password). The reason
++why this is a script and not put into the command line directly is that the user / password don't appear in the log
++
++
++### Build Slave Machines
++The build slave machines are:
++
++1. BuildNode: `sdp1.rtp.raleigh.ibm.com`
++2. BuildNode2: `sdpbuild2.rtp.raleigh.ibm.com`
++3. ODFTestEnv: `sdpdemo.rtp.raleigh.ibm.com`
++4. BuildNodeWin1: `sdpwin1.rtp.raleigh.ibm.com`
++
++Access user: ibmadmin / adm4sdp
++
++These VMs can be managed through [vLaunch](https://vlaunch.rtp.raleigh.ibm.com/).
++
++
++### Scripts / settings required on the build slave
++
++#### Windows
++On the windows slaves, install Git from IBM iRAM, e.g., [here](https://w3-03.ibm.com/tools/cm/iram/oslc/assets/503004E8-5971-230E-3D16-6F3FBDBE2E2C/2.5.1)
++and make sure that the *bin* directory of the installation (typically something like `C:\Program Files (x86)\Git\bin`) is in the path.
++This takes care that `sh.exe` is in the path and picked up by the Jenkins jobs.
++
++#### `smashlittletonfirewall.sh`
++
++Used to smash the Littleton firewall. Put this somewhere in the path, e.g., `~/bin`. The reason why this exists
++at all is so that the intranet credentials don't appear in the build log. The file consists of this one line:
++
++	curl -i -L  --user $INTRANETCREDENTIALS --insecure -X GET http://ips-rtc.swg.usma.ibm.com/jazz/web/projects
++
++
+diff --git a/odf/odf-doc/src/site/markdown/odf-metadata-api.md b/odf/odf-doc/src/site/markdown/odf-metadata-api.md
+new file mode 100755
+index 0000000..59f9419
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/odf-metadata-api.md
+@@ -0,0 +1,49 @@
++# ODF Metadata API
++
++ODF provides a very simple API for searching, retrieving, and (to a limited extent) for creating
++new metadata objects.
++This API abstracts away specifics of the underlying metadata store.
++See the REST resource `metadata`, e.g., [here](https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT/swagger/#/metadata) or look at the Java interface `org.apache.atlas.odf.core.metadata.MetadataStore`.
++
++In this API we distinguish between `MetaDataObject`s and `MetadataObjectReferences`.
++Where the former represent an object as such, the latter is just a reference to an object.
++You may think of the `MetaDataObjectReference` as a generalized XMeta RID.
++
++Simply put, metadata objects are represented as JSON where object attributes
++are represented as JSON attribute with the same name.
++Simple types map to JSON simple types. References
++to another object are represented of JSON objects of type `MetadataObjectReference` that
++has three attribute:
++1. `id`: the object ID
++2. `repositoryId`: the ID of the repository where the object resides
++3. `url` (optional): A URL pointing to the object. For Atlas, this is a link to the object in the
++Atlas dashboard.
++
++The API is read-only, the only objects that can be created are annotations (see section [Data model and extensibility](data-model.html).
++
++Here is an example: suppose there is a table object which has a name and a list of columns. The JSON of this table would look something like this:
++
++	{
++	   "name": "CUSTOMERS,
++	   "columns": [
++	                 {
++	                    "id": "1234-abcd",
++	                    "repositoryId": "atlas:repos1"
++	                 },
++	                 {
++	                    "id": "5678-efgh",
++	                    "repositoryId": "atlas:repos1"
++	                 }
++	              ],
++	   "reference": {
++	                  "id": "9abc-ijkl",
++	                  "repositoryId": "atlas:repos1"
++	                },
++	   "javaClass": "corg.apache.atlas.odf.core.metadata.models.Table"              
++	}
++
++The `reference` value represent the reference to the object itself where as
++`javaClass` denotes the type of object (table in this case).
++The `name` attribute contains the table name where the `columns` value is a list
++of references to two column objects. These references can be retrieved separately
++to look at the details.
+diff --git a/odf/odf-doc/src/site/markdown/operations.md b/odf/odf-doc/src/site/markdown/operations.md
+new file mode 100755
+index 0000000..114493d
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/operations.md
+@@ -0,0 +1 @@
++# Operations
+diff --git a/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md b/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md
+new file mode 100755
+index 0000000..4961453
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md
+@@ -0,0 +1,192 @@
++# Tutorial: Creating Spark discovery services
++
++This tutorial shows how to turn an existing [Apache Spark][1] application into an ODF discovery service of type *Spark*. The tutorial is based the Spark *summary statistics* example application provided with ODF in project `odf-spark-example-application`. It implements the Spark `describe()` method of the Spark [DataSet][2] class that calculates basic summary statistics on a Spark data frame.
++
++## Introduction
++
++ODF supports Spark applications implemented in Java or Scala. In order to be used as ODF discovery services, a Spark application must implement one of the following two interfaces:
++
++* **DataFrame** - intended for Spark applications that process relational tables by using Spark data frames internally.
++* **Generic** - intended for applications that need the full flexibility of ODF.
++
++Both interfaces requires a specific method (or multiple methods) to be implemented by the Spark application that is called by ODF to run the discovery service. This method takes the current Spark context and the data set to be processed as input parameters and returns the annotations to be created. The two interface types are described in detail in separate sections below.
++
++Spark discovery services must be packaged into a single application jar file that contains all required dependencies. Spark libraries, drivers for data access, and the required ODF jar files are implicitly provided by ODF and do not need to be packaged into the application jar file. The jar file may be renamed into *zip* by replacing its extension (not by zipping the jar file) in order to avoid possible security issues when making the file available trough tools like [box](https://box.com).
++
++### Configure an ODF Spark cluster
++
++ODF supports access to a local spark cluster which can be can be configured in the `sparkConfig` section of the ODF settings using the ODF REST API or the ODF web application. The parameter `clusterMasterUrl` must point to the master URL of your Spark cluster, e.g. `spark://dyn-9-152-202-64.boeblingen.de.ibm.com:7077`. An optional set of [Spark configuration options](http://spark.apache.org/docs/latest/configuration.html) can be set in the `configs` parameter by providing appropriate name value pairs. The ODF test environment comes with a ready-to-use local Spark cluser running on your local system. It can monitored on the URL `http://localhost:8080/`.
++
++### Registering a Spark service
++
++A Spark discovery service can be registered using the *Services* tab of the admin Web application or the `/services` endpoint of the [ODF REST API](../swagger/ext-services.html), the following parameters need to be specified to register a service. You may use the following example values to register your own instance of the *summary statistics* discovery service:
++
++* Name of the discovery service: `Spark Summary Statistics`
++* Description: `Calculates summary statistics for a given table or data file.`
++* Unique service ID: `spark-summary-statistics`
++* URL of application jar file (may be renamed to zip): `file:///tmp/odf-spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar` (Update link to point to correct location of the file)
++* Name of entry point to be called: `org.apache.atlas.odf.core.spark.SummaryStatistics`
++* Service interface type: `DataFrame`
++
++For trying out the *generic* interface, entry point `org.apache.atlas.odf.spark.SparkDiscoveryServiceExample` and service interface type `Generic` may be specified.   
++
++### Testing the Spark service
++
++In order to test the Spark service, you can use the *DataSets* tab of the ODF admin Web application. Click on *START ANALYSIS* right to a relational data set (data file or relational table), then select the newly registered Spark discovery service and click *SUBMIT*. You can browse the resulting annotations by searching for the name of the annotation type in the Atlas metadata repository. The example services creates two types of annotations, *SummaryStatisticsAnnotation* and *SparkTableAnnotation*. *SummaryStatisticsAnnotation* annotates data set columns with the five attributes `count`, `mean`, `stddev`, `min`, and `max`, that represent basic statistics of the data set. *SparkTableAnnotation* annotates the data set with a single attribute `count` that represents the number of columns of the data set.
++
++### Developing Spark discovery services
++
++When developing a new discovery service, you may use project `odf-spark-example-application` as a template. Rather than testing your service interactively using the ODF admin web application it is recommended to create a new test case in class `SparkDiscoveryServiceTest` of project `odf-core`. Two methods need to be added, one for describing the service, the other for running the actual test.
++
++The method that describes the service basically contains the same parameters that need to be specified when adding a service through the admin webapp. The jar file must be an URL that pay point to a local file:  
++
++	public static DiscoveryServiceRegistrationInfo getSparkSummaryStatisticsService() {
++		DiscoveryServiceRegistrationInfo regInfo = new DiscoveryServiceRegistrationInfo();
++		regInfo.setId("spark-summary-statistics-example-service");
++		regInfo.setName("Spark summary statistics service");
++		regInfo.setDescription("Example discovery service calling summary statistics Spark application");
++		regInfo.setIconUrl("spark.png");
++		regInfo.setLink("http://www.spark.apache.org");
++		regInfo.setParallelismCount(2);
++		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
++		endpoint.setJar("file:/tmp/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
++		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
++		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
++		regInfo.setEndpoint(endpoint);
++		return regInfo;
++	}
++
++The method that runs the actual test retrieves the service description from the above method and specifies what type of data set should be used for testing (data file vs. relational table) and what types of annotations are created by the discovery service. The test automatically applies the required configurations, runs the service, and checks whether new annotations of the respective types have been created. In order to speed up processing, the existing test can be temporarily commented out.  
++
++	@Test
++	public void testLocalSparkClusterWithLocalDataFile() throws Exception{
++		runSparkServiceTest(
++			getLocalSparkConfig(),
++			DATASET_TYPE.DataFile,
++			getSparkSummaryStatisticsService(),
++			new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" }
++		);
++	}
++
++For compiling the test case, the `odf-core` project needs to be built:
++
++	cd ~/git/shared-discovery-platform/odf-core
++	mvn clean install -DskipTests
++
++The test is started implicitly when building the  `odf-spark` project.
++
++	cd ~/git/shared-discovery-platform/odf-spark
++	mvn clean install
++
++If something goes wrong, debugging information will be printed to stdout during the test. For speeding up the build and test process, option `-Duse.running.atlas` may be added to the two `mvn` commands. This way, a running Atlas instance will be used instead of starting a new instance every time.
++
++#### Test run method example
++
++### Troubleshooting
++
++Before registering a Spark application in ODF as a new discovery service, it is highly recommended to test the application interactively using the `spark-submit` tool and to check whether the application implements the requested interfaces and produces the expected output format. If the execution of a Spark discovery service fails, you can browse the ODF log for additional information.
++
++## DataFrame interface
++
++The ODF *DataFrame* interface for Spark discovery services has a number of advantages that makes it easy to turn an existing Spark application into an ODF discovery service:
++
++* No dependencies to the ODF code, except that a specific method needs to be implemented.
++* No need to care about data access because the data set to be analyzed is provided as Spark data frame.
++* Easy creation of annotations by returning "annotation data frames".   
++
++The simplicity of the DataFrame interface leads to a number of restrictions:
++
++* Only relational data sets can be processed, i.e. data files (OMDataFile) and relational tables (OMTable).
++* Annotations may only consist of a flat list of attributes that represent simple data types, i.e. data structures and references to other data sets are not supported.  
++* Annotations may only be attached to the analyzed relational data set as well as to its columns.
++
++### Method to be implemented
++
++In order to implement the DataFrame interface, the Spark application must implement the following method:
++
++	public static Map<String,Dataset<Row>> processDataFrame(JavaSparkContext sc, DataFrame df, String[] args)
++
++The parameters to be provided to the Spark application are:
++
++* **sc**: The Spark context to be used by the Spark application for performing all Spark operations.
++* **df**: The data set to be analyzed represented by a Spark data frame.
++* **args**: Optional arguments for future use.
++
++### Expected output
++
++The result to be provided by the Spark application must be of type `Map<String,Dataset<Row>>` where `String` represents the type of the annotation to be created and `Dataset<Row>` represents the *annotation data frame* that defines the annotations to be created. If the annotation type does not yet exist, a new annotation type will be dynamically created based on the attributes of the annotation data frame.
++
++The following example describes the format of the annotation data frame. The example uses the BankClientsShort data file provided with ODF. In contains 16 columns with numeric values that represent characteristics of bank clients:
++
++CUST_ID | ACQUIRED | FIRST_PURCHASE_VALUE | CUST_VALUE_SCORE | DURATION_OF_ACQUIRED | CENSOR | ACQ_EXPENSE | ACQ_EXPENSE_SQ | IN_B2B_INDUSTRY | ANNUAL_REVENUE_MIL | TOTAL_EMPLOYEES | RETAIN_EXPENSE | RETAIN_EXPENSE_SQ CROSSBUY | PURCHASE_FREQ | PURCHASE_FREQ_SQ
++---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---
++481 | 0 | 0.0 | 0.0000 | 0 | 0 | 382.32 | 146168.58 | 0 | 56.51 | 264 | 0.00 | 0.0 | 0 | 0 | 0
++482 | 1 | 249.51 | 59.248 | 730 | 1 | 586.61 | 344111.29 | 1 | 35.66 | 355 | 1508.16 | 2274546.59 | 2 | 3 | 9
++483 | 0 | 0.0 | 0.0000 | 0 | 0 | 444.61 | 197678.05 | 1 | 40.42 | 452 | 0.00 | 0.0 | 0 | 0 | 0
++484 | 1 | 351.41 | 77.629 | 730 | 1 | 523.10 | 273633.61 | 1 | 56.36 | 320 | 2526.72 | 6384313.96 | 3 | 12 | 144
++485 | 1 | 460.04 | 76.718 | 730 | 1 | 357.78 | 128006.53 | 1 | 23.53 | 1027 | 2712.48 | 7357547.75 | 2 | 13 | 169
++486 | 1 | 648.6 | 0.0000 | 701 | 0 | 719.61 | 517838.55 | 0 | 59.97 | 1731 | 1460.64 | 2133469.21 | 5 | 11 | 121
++487 | 1 | 352.84 | 63.370 | 730 | 1 | 593.44 | 352171.03 | 1 | 45.08 | 379 | 1324.62 | 1754618.14 | 4 | 8 | 64
++488 | 1 | 193.18 | 0.0000 | 289 | 0 | 840.30 | 706104.09 | 0 | 35.95 | 337 | 1683.83 | 2835283.47 | 6 | 12 | 144
++489 | 1 | 385.14 | 0.0000 | 315 | 0 | 753.13 | 567204.80 | 0 | 58.85 | 745 | 1214.99 | 1476200.7 | 1 | 12 | 144
++
++When applying the *Spark Summary Statistics* service to the table, two annotation data frames will be returned by the service, one for the *SparkSummaryStatistics* and one for the *SparkTableAnnotation* annotation type. The data frame returned for the *SparkSummaryStatistics* annotation type consists of one column for each attribute of the annotation. In the example, the attributes are `count`, `mean`, `stddev`, `min`, and `max` standing for the the column count, the mean value, the standard deviation, the minimum and the maximum value of each column. Each row represents one annotation to be created. The first column `ODF_ANNOTATED_COLUMN` stands for the column of the input data frame to which the annotation should be assigned.
++
++ODF_ANNOTATED_COLUMN    |count   |                mean |              stddev |       min |       max
++------------------------|--------|---------------------|---------------------|-----------|----------
++              CLIENT_ID |  499.0 |   1764.374749498998 |  108.14436025195488 |    1578.0 |    1951.0
++                    AGE |  499.0 |   54.65130260521042 |  19.924220223453258 |      17.0 |      91.0
++          NBR_YEARS_CLI |  499.0 |  16.847695390781563 |  10.279080097460023 |       0.0 |      48.0
++        AVERAGE_BALANCE |  499.0 |   17267.25809619238 |   30099.68272689043 |  -77716.0 |  294296.0
++             ACCOUNT_ID |  499.0 |   126814.4749498998 |  43373.557241804665 |  101578.0 |  201950.0
++
++If there is no (first) column named `ODF_ANNOTATED_COLUMN`, the annotations will be assigned to the data set rather than to its columns. The following example annotation data frame of type *SparkTableAnnotation* assigns a single attribute `count` to the data set:
++
++| count |
++|-------|
++| 499   |
++
++### Example implementation
++
++The implementation of the The *summary statistics*  discovery service may be used as a reference implementation for the DataFrame interface. It is available in class `SummaryStatistics` of project `odf-spark-example-application`.
++
++## Generic interface
++
++The *generic* interface provides the full flexibility of ODF discovery services implemented in Java (or Scala):
++
++* No restrictions regarding the types of data sets to be analyzed.
++* Arbitrary objects may be annotated because references to arbitrary objects may be retrieved from the meta data catalog.
++* Annotations may contain nested structures of data types and references to arbitrary objects.
++
++On the downside, the generic interface may be slightly more difficult to use than the DataFrame interface:
++
++* Discovery service must implement a specific ODF interface.
++* Spark RDDs, data frames etc. must be explicitly constructed (Helper methods are available in class `SparkUtils`).
++* Resulting annotations must be explicitly constructed and linked to the annotated objects.
++
++### Methods to be implemented
++
++The Spark application must implement the `SparkDiscoveryService` interface available in ODF project `odf-core-api`:
++
++	public class SparkDiscoveryServiceExample extends SparkDiscoveryServiceBase implements SparkDiscoveryService
++
++The interface consists of the following two methods that are described in detail in the [Java Docs for ODF services](./apidocs/index.html). The `SparkDiscoveryServiceBase` can be extended for convenience as the `SparkDiscoveryService` interface has much more methods.
++
++#### Actual discovery service logic
++
++This method is called to run the actual discovery service.
++
++	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request)
++
++#### Validation whether data set can be accessed
++
++This method is called internally before running the actual discovery service.
++
++	DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer)
++
++### Example implementation
++
++Class class `SparkDiscoveryServiceExample` in project `odf-spark-example-application` provides an example implementation of a *generic* discovery service. It provides an alternative implementation of the *summary statistics*  discovery service.
++
++  [1]: http://spark.apache.org/
++  [2]: http://spark.apache.org/docs/latest/api/java/index.html
+diff --git a/odf/odf-doc/src/site/markdown/test-env.md b/odf/odf-doc/src/site/markdown/test-env.md
+new file mode 100755
+index 0000000..7fbe6ca
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/test-env.md
+@@ -0,0 +1,71 @@
++# Test environment
++
++The odf-test-env archive contains a simple test environment for ODF.
++It contains all components to run a simple ODF installation, namely
++
++- Apache Kafka
++- Apache Atlas
++- Jetty (to host the ODF web app)
++- Apache Spark
++
++The test environment is available on Linux and Windows.
++
++## Before you start
++
++Make sure that 
++
++- The python executable of Python 2.7 is in your path 
++- The environment variable JAVA_HOME is set and points to a proper JDK (not just a JRE!) 
++
++
++## *Fast path*: Download and install ODF test environment
++
++If you are running on Linux you can download and install the latest ODF test environment by
++downloading the script `download-install-odf-testenv.sh` from
++<a href="https://shared-discovery-platform-jenkins.swg-devops.com:8443/view/1-ODF/job/Open-Discovery-Framework/lastSuccessfulBuild/artifact/odf-test-env/src/main/scripts/download-install-odf-testenv.sh">
++here</a>.
++
++If you call the script with no parameters, it will download, install and start the latest version of the test env.
++The default unpack directory is `~/odf-test-env`.
++
++## Download the test environment manually
++
++You can get the latest version of the test environment from the Jenkins
++<a href="https://shared-discovery-platform-jenkins.swg-devops.com:8443/view/1-ODF/job/Open-Discovery-Framework/lastSuccessfulBuild/artifact/odf-test-env/target/odf-test-env-0.1.0-SNAPSHOT-bin.zip">
++here</a>.
++
++## Running the test environment 
++
++To start the test environment on Linux, run the script ``odftestenv.sh start`` . The script will start four background processes (Zookeeper, Kafka, Atlas, Jetty). To stop the test env, use the script ``odftestenv.sh stop``.
++
++To start the test environment on Windows, run the script ``start-odf-testenv.bat``.
++This will open four command windows (Zookeeper, Kafka, Atlas, Jetty) with respective window titles. To stop the test environment close all these windows. Note that the `HADOOP_HOME` environment variable needs to be set on Windows as described in the [build documentation](build.md).
++
++
++Once the servers are up and running you will reach the ODF console at 
++[https://localhost:58081/odf-web-0.1.0-SNAPSHOT](https://localhost:58081/odf-web-0.1.0-SNAPSHOT).
++
++*Note*: The test environment scripts clean the Zookeeper and Kafka data before it starts.
++This means in particular that the configuration will be reset every time you restart it!
++
++Have fun!
++
++## Restart / cleanup
++
++On Linux, the `odftestenv.sh` script has these additional options
++
++- `cleanconfig`: Restart the test env with a clean configuration and clean Kafka topics
++- `cleanmetadata`: Restart with empty metadata
++- `cleanall`: Both `cleanconfig`and `cleanmetadata`.
++
++
++## Additional Information
++
++### Deploying a new version of the ODF war
++Once started you can hot-deploy a new version of the ODF war file simply by copying it
++to the ``odfjettybase/webapps`` folder even while the test environment's Jetty instance is running.
++Note that it may take a couple of seconds before the new app is available.
++
++If you have the ODF build set up you may want to use the ``deploy-odf-war.bat/.sh`` for this.
++You must edit the environment variable ``ODF_GIT_DIR`` in this script first to point to your local build directory.
++
+diff --git a/odf/odf-doc/src/site/markdown/troubleshooting.md b/odf/odf-doc/src/site/markdown/troubleshooting.md
+new file mode 100755
+index 0000000..9c7fe49
+--- /dev/null
++++ b/odf/odf-doc/src/site/markdown/troubleshooting.md
+@@ -0,0 +1,112 @@
++# Troubleshooting
++
++## ODF
++
++### Debugging using eclipse
++
++You can run Jetty inside Eclipse using the “Eclipse Jetty Feature” (Eclipse -> Help -> Install New Software…). 
++Then, create a new debug configuration (Run -> Debug Configurations…). Specify
++
++WebApp Tab
++Project: odf-web
++WebApp Folder: ../../../../../odf-web/src/main/webapp
++Context Path: /odf-web-0.1.0-SNAPSHOT
++HTTP / HTTPs Port: 58081
++
++Arguments Tab
++VM Arguments: -Dodf.zookeeper.connect=localhost:52181
++
++As the Eclipse Jetty plugin does not support secure connections nor basic authentication, remove the `<security-constraint>`
++and `<login-config>`
++sections from the web.xml. 
++The URL of the ODF Webapp the needs to be prefixed with http:// rather than https://. 
++
++Then start Atlas and Kafka via the test-env (just comment out the line that starts jetty or stop it after being started). 
++Now you can use the debug configuration in eclipse to start ODF.
++
++See also (https://ibm-analytics.slack.com/archives/shared-discovery-pltf/p1467365155000009)
++
++
++### Logs and trace
++ODF uses ``java.util.logging`` APIs so if your runtime environment does support 
++direct setting, use the respective mechanism.
++
++For runtimes that don't support this out-of-the-box (like Jetty) you can set the JVM system property
++``odf.logspec`` with a value like ``<Level>,<Path>`` which advises ODF to 
++write the log with logging level ``<Level>`` to the file under ``<Path>``.
++
++Example:
++
++	-Dodf.logspec=ALL,/tmp/myodflogfile.log
++
++Availabel log levels are the ones for java.util.logging, namely SEVERE, WARNING, INFO, FINE, FINER, FINEST, 
++and ALL.
++
++
++## Atlas
++
++### Logs
++
++The logs directory contains a bunch of logfiles, together with a file called ``atlas.pid`` which
++contains the process ID of the Atlas server that is currently running.
++In case of issues the file ``logs/application.log`` should be checked first. 
++
++### Restarting Atlas
++
++Run these commands (from the atlas installation directory) to restart Atlas
++
++	bin/atlas_stop.py
++	bin/atlas_start.py
++
++### Clean all data
++
++To clean the Atlas repository, simply remove the directories ``data`` and ``logs`` before starting.
++
++
++### Issues
++
++#### Service unavailable (Error 503)
++
++Sometimes, calling any Atlas REST API (and the UI) doesn't work and an HTTP error 503 is returned.
++We see this error occasionally and don't know any way to fix it except cleaning all data and restarting Atlas
++
++
++### Creating Atlas object take a long time
++
++It takes a long time to create an Atlas object and after about a minute you see a message like this in the log
++
++	Unable to update metadata after 60000ms
++	
++This is the result of the kafka queues (which are used for notifications) being in error.
++To fix this restart Atlas (no data cleaning required).
++
++## Kafka / Zookeeper
++
++If there is a problem starting Kafka / Zookeeper check if there might be a port conflict due to other instances of Kafka / Zookeeper using the default port.
++This might be the case if a more recent version of the IS suite is installed on the system on which you want to run ODF.
++
++Example: If another instance of Zookeeper uses the default port 52181 you need to switch the Zookeeper port used by replacing 52181 with a free port number in:
++- start-odf-testenv.bat
++- kafka_2.10-0.8.2.1\config\zookeeper.properties
++- kafka_2.10-0.8.2.1\config\server.properties
++
++### Reset
++
++To reset your Zookeeper / Kafka installation, you will first have to stop the servers:
++
++	bin/kafka-server-stop
++	bin/zookeeper-server-stop
++	
++Next remove the zookeeper data directory and the Kafka logs directory. Note that "logs"
++in Kafka mean the actual data in the topics not the logfiles.
++You can find which directories to clean in the the properties ``dataDir`` in the ``zookeeper.properties``
++file and ``log.dirs`` in ``server.properties`` respectively.
++The defaults are ``/tmp/zookeeper`` and ``/tmp/kafka-logs``.
++
++Restart the servers with
++
++	bin/zookeeper-server-start config/zookeeper.properties
++	bin/kafka-server-start config/server.properties
++
++
++  
+diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml
+new file mode 100755
+index 0000000..e6ffb46
+--- /dev/null
++++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml
+@@ -0,0 +1,44 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
++	<modelVersion>4.0.0</modelVersion>
++
++	<groupId>odf.tutorials</groupId>
++	<artifactId>odf-tutorial-discoveryservice</artifactId>
++	<version>1.2.0-SNAPSHOT</version>
++	<packaging>jar</packaging>
++
++	<name>odf-tutorial-discoveryservice</name>
++
++	<properties>
++		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
++	</properties>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++	</dependencies>
++</project>
+diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java
+new file mode 100755
+index 0000000..2899a53
+--- /dev/null
++++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java
+@@ -0,0 +1,33 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package odftutorial.discoveryservicetutorial;
++
++import org.apache.atlas.odf.core.metadata.Annotation;
++
++/*
++ * An example annotation that adds one property to the default annotation class
++ */
++public class ODFTutorialAnnotation extends Annotation {
++
++	private String tutorialProperty;
++
++	public String getTutorialProperty() {
++		return tutorialProperty;
++	}
++
++	public void setTutorialProperty(String tutorialProperty) {
++		this.tutorialProperty = tutorialProperty;
++	}
++
++}
+diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java
+new file mode 100755
+index 0000000..16848ec
+--- /dev/null
++++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java
+@@ -0,0 +1,46 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package odftutorial.discoveryservicetutorial;
++
++import java.util.Collections;
++import java.util.Date;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
++import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++/**
++ * A simple synchronous discovery service that creates one annotation for the data set it analyzes.
++ *
++ */
++public class ODFTutorialDiscoveryService extends SyncDiscoveryServiceBase {
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		// 1. create an annotation that annotates the data set object passed in the request
++		ODFTutorialAnnotation annotation = new ODFTutorialAnnotation();
++		annotation.setAnnotatedObject(request.getDataSetContainer().getDataSet().getReference());
++		// set a new property called "tutorialProperty" to some string
++		annotation.setTutorialProperty("Tutorial annotation was created on " + new Date());
++
++		// 2. create a response with our annotation created above
++		return createSyncResponse( //
++				ResponseCode.OK, // Everything works OK 
++				"Everything worked", // human-readable message
++				Collections.singletonList(annotation) // new annotations
++		);
++	}
++
++}
+diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json
+new file mode 100755
+index 0000000..2709548
+--- /dev/null
++++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json
+@@ -0,0 +1,13 @@
++[
++  {
++	"id": "odftutorial.discoveryservicetutorial.ODFTutorialDiscoveryService",
++	"name": "First tutorial service",
++	"description": "The first tutorial service that is synchronous and creates just a single annotation for a data set.",
++	"deletable": true,
++	"endpoint": {
++	  "runtimeName": "Java",
++	  "className": "odftutorial.discoveryservicetutorial.ODFTutorialDiscoveryService"
++	},
++	"iconUrl": "https://www-03.ibm.com/ibm/history/exhibits/logo/images/920911.jpg"
++  }
++]
+diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java
+new file mode 100755
+index 0000000..1eab53f
+--- /dev/null
++++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java
+@@ -0,0 +1,29 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package odftutorial.discoveryservicetutorial;
++
++import org.junit.Assert;
++import org.junit.Test;
++
++/**
++ * Unit test for discovery service
++ */
++public class ODFTutorialDiscoveryServiceTest {
++	
++	@Test
++	public void test() throws Exception {
++		Assert.assertTrue(true);
++	}
++}
++
+diff --git a/odf/odf-doc/src/site/site.xml b/odf/odf-doc/src/site/site.xml
+new file mode 100755
+index 0000000..c810e66
+--- /dev/null
++++ b/odf/odf-doc/src/site/site.xml
+@@ -0,0 +1,62 @@
++<?xml version="1.0" encoding="ISO-8859-1"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project name="Open Discovery Framework">
++	<skin>
++		<groupId>org.apache.maven.skins</groupId>
++		<artifactId>maven-fluido-skin</artifactId>
++		<version>1.4</version>
++	</skin>
++	<bannerLeft>
++		<name>Open Discovery Framework</name>
++	</bannerLeft>
++	<custom>
++		<fluidoSkin>
++			<topBarEnabled>false</topBarEnabled>
++			<sideBarEnabled>true</sideBarEnabled>
++		</fluidoSkin>
++	</custom>
++	<body>
++		<links>
++			<item name="Apache Atlas" href="http://atlas.incubator.apache.org" />
++			<item name="Apache Kafka" href="http://kafka.apache.org" />
++		</links>
++		<menu name="Getting Started">
++			<item name="Overview" href="index.html" />
++			<item name="First Steps" href="first-steps.html" />
++			<item name="Build" href="build.html" />
++			<item name="Test Environment" href="test-env.html" />
++		</menu>
++		<menu name="Tutorials">
++			<item name="Install ODF and its prerequisites manually" href="install.html"/>
++			<item name="Run your first ODF analysis" href="first-analysis-tutorial.html"/>
++			<item name="Build and run your first Discovery Service" href="discovery-service-tutorial.html"/>
++			<item name="Creating Spark discovery services" href="spark-discovery-service-tutorial.html"/>
++		</menu>
++		<menu name="Reference">
++			<item name="ODF Metadata API" href="odf-metadata-api.html" />
++			<item name="API reference" href="api-reference.html" />
++			<item name="Troubleshooting" href="troubleshooting.html" />
++		</menu>
++		<menu name="Customization">
++			<item name="Discovery Services" href="discovery-services.html" />
++			<item name="Data Model" href="data-model.html" />
++		</menu>
++		<menu name="Internal">
++			<item name="Jenkins build" href="jenkins-build.html" />
++		</menu>
++		<footer>All rights reserved.</footer>
++	</body>
++</project>
+diff --git a/odf/odf-messaging/.gitignore b/odf/odf-messaging/.gitignore
+new file mode 100755
+index 0000000..9d8eebd
+--- /dev/null
++++ b/odf/odf-messaging/.gitignore
+@@ -0,0 +1,6 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++derby.log
+diff --git a/odf/odf-messaging/pom.xml b/odf/odf-messaging/pom.xml
+new file mode 100755
+index 0000000..95f9d44
+--- /dev/null
++++ b/odf/odf-messaging/pom.xml
+@@ -0,0 +1,208 @@
++<?xml version="1.0" encoding="UTF-8"?>

++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

++	<modelVersion>4.0.0</modelVersion>

++	<artifactId>odf-messaging</artifactId>

++	<name>odf-messaging</name>

++

++	<parent>

++		<groupId>org.apache.atlas.odf</groupId>

++		<artifactId>odf</artifactId>

++		<version>1.2.0-SNAPSHOT</version>

++	</parent>

++

++	<dependencies>

++		<dependency>

++			<groupId>org.apache.atlas.odf</groupId>

++			<artifactId>odf-api</artifactId>

++			<version>1.2.0-SNAPSHOT</version>

++			<scope>compile</scope>

++		</dependency>

++		<dependency>

++			<groupId>org.apache.atlas.odf</groupId>

++			<artifactId>odf-core</artifactId>

++			<version>1.2.0-SNAPSHOT</version>

++			<scope>compile</scope>

++		</dependency>

++		<dependency>

++			<groupId>org.apache.kafka</groupId>

++			<artifactId>kafka-clients</artifactId>

++			<version>0.10.0.0</version>

++			<scope>compile</scope>

++		</dependency>

++		<dependency>

++			<groupId>org.apache.kafka</groupId>

++			<artifactId>kafka_2.11</artifactId>

++			<version>0.10.0.0</version>

++			<scope>compile</scope>

++		</dependency>

++		<dependency>

++			<groupId>junit</groupId>

++			<artifactId>junit</artifactId>

++			<version>4.12</version>

++			<scope>test</scope>

++		</dependency>

++		<dependency>

++			<groupId>org.apache.atlas.odf</groupId>

++			<artifactId>odf-core</artifactId>

++			<version>1.2.0-SNAPSHOT</version>

++			<type>test-jar</type>

++			<scope>test</scope>

++		</dependency>

++		<dependency>

++			<groupId>org.apache.derby</groupId>

++			<artifactId>derby</artifactId>

++			<version>10.12.1.1</version>

++			<scope>test</scope>

++		</dependency>

++	</dependencies>

++

++	<build>

++		<plugins>

++			<plugin>

++				<groupId>org.apache.maven.plugins</groupId>

++				<artifactId>maven-jar-plugin</artifactId>

++				<version>2.6</version>

++				<executions>

++					<execution>

++						<goals>

++							<goal>test-jar</goal>

++						</goals>

++						<configuration>

++						<!-- remove implementations properties file for test jar -->

++							<excludes>

++								<exclude>org/apache/atlas/odf/odf-implementation.properties</exclude>

++							</excludes>

++						</configuration>

++					</execution>

++				</executions>

++			</plugin>

++		</plugins>

++	</build>

++

++	<profiles>

++		<profile>

++			<id>all-unit-tests</id>

++			<activation>

++				<activeByDefault>true</activeByDefault>

++			</activation>

++			<build>

++				<plugins>

++					<plugin>

++						<groupId>org.apache.maven.plugins</groupId>

++						<artifactId>maven-surefire-plugin</artifactId>

++						<version>2.19</version>

++						<configuration>

++							<systemPropertyVariables>

++								<odf.logspec>${odf.unittest.logspec}</odf.logspec>

++								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

++								<odf.build.project.name>${project.name}</odf.build.project.name>

++

++								<!-- additional properties for the test services -->

++						 		<asynctestservice.testparam>sometestvalueforasync</asynctestservice.testparam>

++								<synctestservice.testparam>sometestvalueforsync</synctestservice.testparam>

++							</systemPropertyVariables>

++							<dependenciesToScan>

++								<dependency>org.apache.atlas.odf:odf-core</dependency>

++							</dependenciesToScan>

++							<!--

++							<includes><include>**ShutdownTest**</include></includes>

++				 -->

++

++							<excludes>

++								<exclude>**/integrationtest/**</exclude>

++								<exclude>**/configuration/**</exclude>

++							</excludes>

++						</configuration>

++					</plugin>

++					<plugin>

++						<groupId>org.apache.maven.plugins</groupId>

++						<artifactId>maven-failsafe-plugin</artifactId>

++						<version>2.19</version>

++						<configuration>

++							<systemPropertyVariables>

++								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

++								<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>

++							</systemPropertyVariables>

++							<dependenciesToScan>

++								<dependency>org.apache.atlas.odf:odf-core</dependency>

++							</dependenciesToScan>

++							<includes>

++								<include>**/integrationtest/**/**.java</include>

++							</includes>

++							<excludes>

++								<exclude>**/integrationtest/**/SparkDiscoveryService*</exclude>

++								<exclude>**/integrationtest/**/AnalysisManagerTest.java</exclude>

++							</excludes>

++						</configuration>

++						<executions>

++							<execution>

++								<id>integration-test</id>

++								<goals>

++									<goal>integration-test</goal>

++								</goals>

++							</execution>

++							<execution>

++								<id>verify</id>

++								<goals>

++									<goal>verify</goal>

++								</goals>

++							</execution>

++						</executions>

++					</plugin>

++				</plugins>

++			</build>

++		</profile>

++		<profile>

++			<id>reduced-tests</id>

++			<activation>

++				<property>

++					<name>reduced-tests</name>

++					<value>true</value>

++				</property>

++			</activation>

++			<build>

++				<plugins>

++					<plugin>

++						<groupId>org.apache.maven.plugins</groupId>

++						<artifactId>maven-surefire-plugin</artifactId>

++						<version>2.19</version>

++						<configuration>

++							<systemPropertyVariables>

++								<odf.logspec>${odf.unittest.logspec}</odf.logspec>

++								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

++								<odf.build.project.name>${project.name}</odf.build.project.name>

++							</systemPropertyVariables>

++							<dependenciesToScan>

++								<dependency>corg.apache.atlas.odf:odf-core</dependency>

++							</dependenciesToScan>

++							<excludes>

++								<exclude>**/KafkaQueueManagerTest.java</exclude>

++								<exclude>**/ShutdownTest.java</exclude>

++								<exclude>**/MultiPartitionConsumerTest.java</exclude>

++								<exclude>**/integrationtest/**/SparkDiscoveryService*</exclude>

++								<exclude>**/integrationtest/**/AnalysisManagerTest.java</exclude>

++								<exclude>**/configuration/**</exclude>

++							</excludes>

++						</configuration>

++					</plugin>

++				</plugins>

++			</build>

++		</profile>

++	</profiles>

++

++</project>

+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java
+new file mode 100755
+index 0000000..c9c95cc
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java
+@@ -0,0 +1,545 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.Iterator;
++import java.util.LinkedHashMap;
++import java.util.List;
++import java.util.Map;
++import java.util.Map.Entry;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.TimeUnit;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.apache.atlas.odf.api.engine.BrokerNode;
++import org.apache.atlas.odf.api.engine.KafkaBrokerPartitionMessageCountInfo;
++import org.apache.atlas.odf.api.engine.KafkaPartitionInfo;
++import org.apache.kafka.clients.CommonClientConfigs;
++import org.apache.kafka.clients.consumer.ConsumerConfig;
++import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
++import org.apache.kafka.clients.consumer.ConsumerRecord;
++import org.apache.kafka.clients.consumer.ConsumerRecords;
++import org.apache.kafka.clients.consumer.KafkaConsumer;
++import org.apache.kafka.common.Node;
++import org.apache.kafka.common.TopicPartition;
++import org.apache.kafka.common.protocol.SecurityProtocol;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
++import org.apache.atlas.odf.json.JSONUtils;
++
++import kafka.admin.AdminClient;
++import kafka.admin.AdminClient.ConsumerSummary;
++import kafka.api.FetchRequest;
++import kafka.api.FetchRequestBuilder;
++import kafka.api.GroupCoordinatorRequest;
++import kafka.api.GroupCoordinatorResponse;
++import kafka.api.OffsetRequest;
++import kafka.cluster.Broker;
++import kafka.cluster.BrokerEndPoint;
++import kafka.cluster.EndPoint;
++import kafka.common.ErrorMapping;
++import kafka.common.OffsetAndMetadata;
++import kafka.common.OffsetMetadata;
++import kafka.common.OffsetMetadataAndError;
++import kafka.common.TopicAndPartition;
++import kafka.coordinator.GroupOverview;
++import kafka.javaapi.FetchResponse;
++import kafka.javaapi.OffsetCommitRequest;
++import kafka.javaapi.OffsetCommitResponse;
++import kafka.javaapi.OffsetFetchRequest;
++import kafka.javaapi.OffsetFetchResponse;
++import kafka.javaapi.PartitionMetadata;
++import kafka.javaapi.TopicMetadata;
++import kafka.javaapi.TopicMetadataRequest;
++import kafka.javaapi.consumer.SimpleConsumer;
++import kafka.message.MessageAndOffset;
++import kafka.network.BlockingChannel;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++import scala.collection.JavaConversions;
++import scala.collection.Seq;
++
++public class KafkaMonitor {
++	private final static String CLIENT_ID = "odfMonitorClient";
++
++	private Logger logger = Logger.getLogger(KafkaMonitor.class.getName());
++
++	//this only works for consumer groups managed by the kafka coordinator (unlike with kafka < 0.9 where consumers where managed by zookeeper)
++	public List<String> getConsumerGroups(String zookeeperHost, String topic) {
++		List<String> result = new ArrayList<String>();
++		try {
++			List<String> brokers = getBrokers(zookeeperHost);
++			StringBuilder brokersParam = new StringBuilder();
++			final Iterator<String> iterator = brokers.iterator();
++			while (iterator.hasNext()) {
++				brokersParam.append(iterator.next());
++				if (iterator.hasNext()) {
++					brokersParam.append(";");
++				}
++			}
++			Properties props = new Properties();
++			props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokersParam.toString());
++			final AdminClient client = AdminClient.create(props);
++			final Map<Node, scala.collection.immutable.List<GroupOverview>> javaMap = JavaConversions.mapAsJavaMap(client.listAllConsumerGroups());
++			for (Entry<Node, scala.collection.immutable.List<GroupOverview>> entry : javaMap.entrySet()) {
++				for (GroupOverview group : JavaConversions.seqAsJavaList(entry.getValue())) {
++					//Option<scala.collection.immutable.List<ConsumerSummary>> optConsumerSummary = client.describeConsumerGroup(group.groupId());
++					//if (optConsumerSummary.nonEmpty()) {
++						for (ConsumerSummary summary : JavaConversions.seqAsJavaList(client.describeConsumerGroup(group.groupId()) ) ) {
++							for (TopicPartition part : JavaConversions.seqAsJavaList(summary.assignment())) {
++								if (part.topic().equals(topic) && !result.contains(group.groupId())) {
++									result.add(group.groupId());
++								break;
++							}
++						}
++					}
++					//}
++				}
++			}
++		} catch (Exception ex) {
++			logger.log(Level.WARNING, "An error occured retrieving the consumer groups", ex.getCause());
++			ex.printStackTrace();
++		}
++		return result;
++	}
++
++	private ZkUtils getZkUtils(String zookeeperHost, ZkClient zkClient) {
++		return new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false);
++	}
++
++	private ZkClient getZkClient(String zookeeperHost) {
++		return new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
++	}
++
++	public boolean setOffset(String zookeeperHost, String consumerGroup, String topic, int partition, long offset) {
++		logger.info("set offset for " + consumerGroup + " " + offset);
++		long now = System.currentTimeMillis();
++		Map<TopicAndPartition, OffsetAndMetadata> offsets = new LinkedHashMap<TopicAndPartition, OffsetAndMetadata>();
++		final TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
++		offsets.put(topicAndPartition, new OffsetAndMetadata(new OffsetMetadata(offset, "Manually set offset"), now, -1));
++		int correlationId = 0;
++		OffsetCommitRequest req = new OffsetCommitRequest(consumerGroup, offsets, correlationId++, CLIENT_ID, (short) 1);
++		final BlockingChannel channel = getOffsetManagerChannel(zookeeperHost, consumerGroup);
++		channel.send(req.underlying());
++		OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().payload());
++		if (commitResponse.hasError()) {
++			logger.warning("Could not commit offset! " + topic + ":" + partition + "-" + offset + " error: " + commitResponse.errorCode(topicAndPartition));
++			channel.disconnect();
++			return false;
++		} else {
++			logger.info("offset commit successfully");
++			channel.disconnect();
++			return true;
++		}
++	}
++
++	public List<String> getBrokers(String zookeeperHost) {
++		List<String> result = new ArrayList<String>();
++		ZkClient zkClient = getZkClient(zookeeperHost);
++		List<Broker> brokerList = JavaConversions.seqAsJavaList(getZkUtils(zookeeperHost, zkClient).getAllBrokersInCluster());
++		Iterator<Broker> brokerIterator = brokerList.iterator();
++		while (brokerIterator.hasNext()) {
++			for (Entry<SecurityProtocol, EndPoint> entry : JavaConversions.mapAsJavaMap(brokerIterator.next().endPoints()).entrySet()) {
++				String connectionString = entry.getValue().connectionString();
++				//remove protocol from string
++				connectionString = connectionString.split("://")[1];
++				result.add(connectionString);
++			}
++		}
++		zkClient.close();
++		return result;
++	}
++
++	public PartitionOffsetInfo getOffsetsOfLastMessagesForTopic(String zookeeperHost, String topic, int partition) {
++		List<String> kafkaBrokers = getBrokers(zookeeperHost);
++		return getOffsetsOfLastMessagesForTopic(kafkaBrokers, topic, partition);
++	}
++
++	public PartitionOffsetInfo getOffsetsOfLastMessagesForTopic(final List<String> kafkaBrokers, final String topic, final int partition) {
++		logger.entering(this.getClass().getName(), "getOffsetsOfLastMessagesForTopic");
++
++		final PartitionOffsetInfo info = new PartitionOffsetInfo();
++		info.setOffset(-1l);
++		info.setPartitionId(partition);
++
++		final CountDownLatch subscribeAndPollLatch = new CountDownLatch(2);
++
++		final Thread consumerThread = new Thread(new Runnable() {
++			@Override
++			public void run() {
++				Properties kafkaConsumerProps = getKafkaConsumerProps(kafkaBrokers);
++				final KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(kafkaConsumerProps);
++				final TopicPartition topicPartition = new TopicPartition(topic, partition);
++				consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
++
++					@Override
++					public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
++						// TODO Auto-generated method stub
++
++					}
++
++					@Override
++					public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
++						subscribeAndPollLatch.countDown();
++					}
++				});
++				logger.info("poll records from kafka for offset retrieval");
++
++				final ConsumerRecords<String, String> poll = consumer.poll(500);
++				List<ConsumerRecord<String, String>> polledRecords = poll.records(topicPartition);
++				logger.info("polled records: " + poll.count());
++				if (!polledRecords.isEmpty()) {
++					ConsumerRecord<String, String> record = polledRecords.get(polledRecords.size() - 1);
++					info.setMessage(record.value());
++					info.setOffset(record.offset());
++					info.setPartitionId(partition);
++					logger.info("polled last offset: " + record.offset());
++				}
++				subscribeAndPollLatch.countDown();
++				consumer.close();
++			}
++		});
++		logger.info("start retrieval of offset");
++		consumerThread.start();
++
++		try {
++			boolean result = subscribeAndPollLatch.await(5000, TimeUnit.MILLISECONDS);
++			if (result) {
++				logger.info("Subscribed and retrieved offset on time: " + JSONUtils.toJSON(info));
++			} else {
++				logger.warning("Could not subscribe and retrieve offset on time " + JSONUtils.toJSON(info));
++				consumerThread.interrupt();
++			}
++		} catch (InterruptedException e) {
++			e.printStackTrace();
++			logger.log(Level.WARNING, "An error occured retrieving the last retrieved offset", e.getCause());
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.log(Level.WARNING, "An error occured retrieving the last retrieved offset", e.getCause());
++		}
++
++		return info;
++	}
++
++	protected Properties getKafkaConsumerProps(List<String> kafkaBrokers) {
++		Properties kafkaConsumerProps = new Properties();
++		kafkaConsumerProps.put("group.id", "OffsetRetrieverConsumer" + UUID.randomUUID().toString());
++		kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
++		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++
++		StringBuilder brokers = new StringBuilder();
++		final Iterator<String> iterator = kafkaBrokers.iterator();
++		while (iterator.hasNext()) {
++			brokers.append(iterator.next());
++			if (iterator.hasNext()) {
++				brokers.append(",");
++			}
++		}
++		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers.toString());
++		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
++		return kafkaConsumerProps;
++	}
++
++	public List<KafkaBrokerPartitionMessageCountInfo> getMessageCountForTopic(String zookeeperHost, String topic) {
++		logger.entering(this.getClass().getName(), "getMessageCountForTopic");
++		List<KafkaBrokerPartitionMessageCountInfo> result = new ArrayList<KafkaBrokerPartitionMessageCountInfo>();
++
++		List<Integer> partitions = getPartitionIdsForTopic(zookeeperHost, topic);
++
++		List<String> kafkaBrokers = getBrokers(zookeeperHost);
++		for (int cnt = 0; cnt < kafkaBrokers.size(); cnt++) {
++			String broker = kafkaBrokers.get(cnt);
++			logger.info("getMessageCountForTopic from broker: " + broker);
++			KafkaBrokerPartitionMessageCountInfo container = new KafkaBrokerPartitionMessageCountInfo();
++			container.setBroker(broker);
++
++			String[] splitBroker = broker.split(":");
++			String host = splitBroker[0];
++			String port = splitBroker[1];
++			SimpleConsumer consumer = new SimpleConsumer(host, Integer.valueOf(port), 100000, 64 * 1024, "leaderLookup");
++			Map<Integer, Long> partitionCountMap = new HashMap<Integer, Long>();
++
++			for (Integer partition : partitions) {
++				logger.info("broker: " + broker + ", partition " + partition);
++				partitionCountMap.put(partition, null);
++				FetchRequest req = new FetchRequestBuilder().clientId(CLIENT_ID).addFetch(topic, partition, 0, 100000).build();
++				FetchResponse fetchResponse = consumer.fetch(req);
++
++				if (fetchResponse.hasError()) {
++					//in case of a broker error, do nothing. The broker has no information about the partition so we continue with the next one.
++					if (fetchResponse.errorCode(topic, partition) == ErrorMapping.NotLeaderForPartitionCode()) {
++						logger.info("broker " + broker + " is not leader for partition " + partition + ", cannot retrieve MessageCountForTopic");
++					} else {
++						logger.warning("broker: " + broker + ", partition " + partition + " has error: " + fetchResponse.errorCode(topic, partition));
++					}
++					continue;
++				}
++
++				long numRead = 0;
++				long readOffset = numRead;
++
++				for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
++					long currentOffset = messageAndOffset.offset();
++					if (currentOffset < readOffset) {
++						logger.info("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
++						continue;
++					}
++					readOffset = messageAndOffset.nextOffset();
++					numRead++;
++				}
++
++				logger.info("broker: " + broker + ", partition " + partition + " total messages: " + numRead);
++				partitionCountMap.put(partition, numRead);
++			}
++			consumer.close();
++			container.setPartitionMsgCountMap(partitionCountMap);
++			result.add(container);
++		}
++
++		return result;
++	}
++
++	/**
++	 * @param group
++	 * @param topic
++	 * @return a list of partitions and their offsets. If no offset is found, it is returned as -1
++	 */
++	public List<PartitionOffsetInfo> getOffsetsForTopic(String zookeeperHost, String group, String topic) {
++		BlockingChannel channel = getOffsetManagerChannel(zookeeperHost, group);
++
++		List<Integer> partitionIds = getPartitionIdsForTopic(zookeeperHost, topic);
++		List<TopicAndPartition> partitions = new ArrayList<TopicAndPartition>();
++		int correlationId = 0;
++		for (Integer id : partitionIds) {
++			TopicAndPartition testPartition0 = new TopicAndPartition(topic, id);
++			partitions.add(testPartition0);
++		}
++
++		OffsetFetchRequest fetchRequest = new OffsetFetchRequest(group, partitions, (short) 1 /* version */, // version 1 and above fetch from Kafka, version 0 fetches from ZooKeeper
++				correlationId++, CLIENT_ID);
++
++		List<PartitionOffsetInfo> offsetResult = new ArrayList<PartitionOffsetInfo>();
++		int retryCount = 0;
++		//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
++		boolean done = false;
++		while (retryCount < 5 && !done) {
++			offsetResult = new ArrayList<PartitionOffsetInfo>();
++			retryCount++;
++			channel.send(fetchRequest.underlying());
++			OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
++
++			boolean errorFound = false;
++			for (TopicAndPartition part : partitions) {
++				if (part.topic().equals(topic)) {
++					PartitionOffsetInfo offsetInfo = new PartitionOffsetInfo();
++					offsetInfo.setPartitionId(part.partition());
++					OffsetMetadataAndError result = fetchResponse.offsets().get(part);
++					short offsetFetchErrorCode = result.error();
++					if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
++						channel.disconnect();
++						String msg = "Offset could not be fetched, the used broker is not the coordinator for this consumer";
++						offsetInfo.setMessage(msg);
++						logger.warning(msg);
++						errorFound = true;
++						break;
++					} else if (offsetFetchErrorCode == ErrorMapping.OffsetsLoadInProgressCode()) {
++						logger.warning("Offset could not be fetched at this point, the offsets are not available yet");
++						try {
++							Thread.sleep(2000);
++						} catch (InterruptedException e) {
++							e.printStackTrace();
++						}
++						//Offsets are not available yet. Wait and try again
++						errorFound = true;
++						break;
++					} else if (result.error() != ErrorMapping.NoError()) {
++						String msg = MessageFormat.format("Offset could not be fetched at this point, an unknown error occured ( {0} )", result.error());
++						offsetInfo.setMessage(msg);
++						logger.warning(msg);
++					} else {
++						long offset = result.offset();
++						offsetInfo.setOffset(offset);
++					}
++
++					offsetResult.add(offsetInfo);
++				}
++			}
++			if (!errorFound) {
++				done = true;
++			}
++		}
++
++		if (channel.isConnected()) {
++			channel.disconnect();
++		}
++		return offsetResult;
++	}
++
++	public List<TopicMetadata> getMetadataForTopic(String zookeeperHost, String kafkaTopic) {
++		//connecting to a single broker should be enough because every single broker knows everything we need
++		for (String brokerHost : getBrokers(zookeeperHost)) {
++			brokerHost = brokerHost.replace("PLAINTEXT://", "");
++			String[] splitBroker = brokerHost.split(":");
++			String ip = splitBroker[0];
++			String port = splitBroker[1];
++
++			//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
++			SimpleConsumer consumer = null;
++			try {
++				consumer = new SimpleConsumer(ip, Integer.valueOf(port), 100000, 64 * 1024, "leaderLookup");
++				int retryCount = 0;
++				boolean done = false;
++				while (retryCount < 5 && !done) {
++					retryCount++;
++
++					List<String> topics = Collections.singletonList(kafkaTopic);
++					TopicMetadataRequest req = new TopicMetadataRequest(topics);
++					kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
++					List<TopicMetadata> metaData = resp.topicsMetadata();
++
++					boolean errorFound = false;
++					for (TopicMetadata item : metaData) {
++						if (item.topic().equals(kafkaTopic)) {
++							if (item.errorCode() == ErrorMapping.LeaderNotAvailableCode()) {
++								//wait and try again
++								errorFound = true;
++								try {
++									Thread.sleep(2000);
++								} catch (InterruptedException e) {
++									e.printStackTrace();
++								}
++								break;
++							}
++							return metaData;
++						}
++					}
++
++					if (!errorFound) {
++						done = true;
++					}
++				}
++			} finally {
++				if (consumer != null) {
++					consumer.close();
++				}
++			}
++		}
++		return null;
++	}
++
++	public List<Integer> getPartitionsForTopic(String zookeeperHost, String topic) {
++		ZkClient zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
++		Map<String, Seq<Object>> partitions = JavaConversions
++				.mapAsJavaMap(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false).getPartitionsForTopics(JavaConversions.asScalaBuffer(Arrays.asList(topic)).toList()));
++		List<Object> partitionObjList = JavaConversions.seqAsJavaList(partitions.entrySet().iterator().next().getValue());
++		List<Integer> partitionsList = new ArrayList<Integer>();
++		for (Object partObj : partitionObjList) {
++			partitionsList.add((Integer) partObj);
++		}
++		zkClient.close();
++		return partitionsList;
++	}
++
++	public List<KafkaPartitionInfo> getPartitionInfoForTopic(String zookeeperHost, String topic) {
++		List<TopicMetadata> topicInfos = getMetadataForTopic(zookeeperHost, topic);
++		List<KafkaPartitionInfo> partitionInfoList = new ArrayList<KafkaPartitionInfo>();
++		for (TopicMetadata topicInfo : topicInfos) {
++			for (PartitionMetadata part : topicInfo.partitionsMetadata()) {
++				KafkaPartitionInfo info = new KafkaPartitionInfo();
++				info.setPartitionId(part.partitionId());
++
++				List<BrokerNode> partitionNodes = new ArrayList<BrokerNode>();
++				for (BrokerEndPoint brokerPoint : part.isr()) {
++					BrokerNode node = new BrokerNode();
++					node.setHost(brokerPoint.connectionString());
++					node.setLeader(brokerPoint.connectionString().equals(part.leader().connectionString()));
++					partitionNodes.add(node);
++				}
++				info.setNodes(partitionNodes);
++				partitionInfoList.add(info);
++			}
++		}
++		//partitionInformation is collected, end loop and return
++		return partitionInfoList;
++	}
++
++	public List<Integer> getPartitionIdsForTopic(String zookeeperHost, String topic) {
++		List<TopicMetadata> metadata = getMetadataForTopic(zookeeperHost, topic);
++
++		List<Integer> partitionsList = new ArrayList<Integer>();
++		if (metadata != null && metadata.size() > 0) {
++			for (PartitionMetadata partData : metadata.get(0).partitionsMetadata()) {
++				partitionsList.add(partData.partitionId());
++			}
++		}
++
++		return partitionsList;
++	}
++
++	private BlockingChannel getOffsetManagerChannel(String zookeeperHost, String group) {
++		int correlationId = 0;
++		for (String broker : getBrokers(zookeeperHost)) {
++			String[] splitBroker = broker.split(":");
++			String ip = splitBroker[0];
++			String port = splitBroker[1];
++
++			int retryCount = 0;
++			//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
++			while (retryCount < 5) {
++				retryCount++;
++
++				BlockingChannel channel = new BlockingChannel(ip, Integer.valueOf(port), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(),
++						5000 /* read timeout in millis */);
++				channel.connect();
++				channel.send(new GroupCoordinatorRequest(group, OffsetRequest.CurrentVersion(), correlationId++, CLIENT_ID));
++				GroupCoordinatorResponse metadataResponse = GroupCoordinatorResponse.readFrom(channel.receive().payload());
++
++				if (metadataResponse.errorCode() == ErrorMapping.NoError()) {
++					BrokerEndPoint endPoint = metadataResponse.coordinatorOpt().get();
++					if (!endPoint.host().equals(ip) && !port.equals(endPoint.port())) {
++						channel.disconnect();
++						channel = new BlockingChannel(endPoint.host(), endPoint.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000);
++						channel.connect();
++					}
++					return channel;
++				} else if (metadataResponse.errorCode() == ErrorMapping.ConsumerCoordinatorNotAvailableCode()
++						|| metadataResponse.errorCode() == ErrorMapping.OffsetsLoadInProgressCode()) {
++					//wait and try again
++					try {
++						Thread.sleep(2000);
++					} catch (InterruptedException e) {
++						e.printStackTrace();
++					}
++				} else {
++					//unknown error, continue with next broker
++					break;
++				}
++			}
++		}
++		throw new RuntimeException("Kafka Consumer Broker not available!");
++	}
++}
+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java
+new file mode 100755
+index 0000000..33c4ae0
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java
+@@ -0,0 +1,105 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.util.Iterator;
++import java.util.Properties;
++import java.util.concurrent.ExecutionException;
++import java.util.concurrent.TimeUnit;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.kafka.clients.producer.Callback;
++import org.apache.kafka.clients.producer.KafkaProducer;
++import org.apache.kafka.clients.producer.ProducerConfig;
++import org.apache.kafka.clients.producer.ProducerRecord;
++import org.apache.kafka.common.errors.TimeoutException;
++
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.MessageEncryption;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++
++public class KafkaProducerManager {
++
++	private final static Logger logger = Logger.getLogger(KafkaProducerManager.class.getName());
++	private static KafkaProducer<String, String> producer;
++
++	protected Properties getKafkaProducerConfig() {
++		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++		ODFInternalFactory f = new ODFInternalFactory();
++		Properties props = odfConfig.getKafkaProducerProperties();
++		String zookeeperConnect = f.create(Environment.class).getZookeeperConnectString();
++		final Iterator<String> brokers = f.create(KafkaMonitor.class).getBrokers(zookeeperConnect).iterator();
++		StringBuilder brokersString = new StringBuilder();
++		while (brokers.hasNext()) {
++			brokersString.append(brokers.next());
++			if (brokers.hasNext()) {
++				brokersString.append(",");
++			}
++		}
++		logger.info("Sending messages to brokers: " + brokersString.toString());
++		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++		props.put(ProducerConfig.CLIENT_ID_CONFIG, "ODF_MESSAGE_PRODUCER");
++		return props;
++	}
++
++	private KafkaProducer<String, String> getProducer() {
++		if (producer == null) {
++			producer = new KafkaProducer<String, String>(getKafkaProducerConfig());
++		}
++		return producer;
++	}
++
++	public void sendMsg(String topicName, String key, String value) {
++		MessageEncryption msgEncryption = new ODFInternalFactory().create(MessageEncryption.class);
++		value = msgEncryption.encrypt(value);
++		sendMsg(topicName, key, value, null);
++	}
++
++	public void sendMsg(final String topicName, final String key, final String value, final Callback callback) {
++		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, key, value);
++		try {
++			int retryCount = 0;
++			boolean msgSend = false;
++			while (retryCount < 5 && !msgSend) {
++				try {
++					getProducer().send(producerRecord, callback).get(4000, TimeUnit.MILLISECONDS);
++					msgSend = true;
++				} catch (ExecutionException ex) {
++					if (ex.getCause() instanceof TimeoutException) {
++						logger.warning("Message could not be send within 4000 ms");
++						retryCount++;
++					} else {
++						throw ex;
++					}
++
++				}
++			}
++			if (retryCount == 5) {
++				logger.warning("Message could not be send within 5 retries!");
++				logger.fine("topic: " + topicName + " key " + key + " msg " + value);
++			}
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "Exception while sending message", exc);
++			if (producer != null) {
++				producer.close();
++			}
++			producer = null;
++			throw new RuntimeException(exc);
++		}
++	}
++
++}
+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java
+new file mode 100755
+index 0000000..d0cf704
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java
+@@ -0,0 +1,233 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.text.MessageFormat;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.List;
++import java.util.Properties;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.exception.ZkTimeoutException;
++import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
++import org.apache.kafka.clients.consumer.ConsumerRecord;
++import org.apache.kafka.clients.consumer.ConsumerRecords;
++import org.apache.kafka.clients.consumer.KafkaConsumer;
++import org.apache.kafka.common.TopicPartition;
++import org.apache.kafka.common.errors.WakeupException;
++
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.messaging.MessageEncryption;
++
++import kafka.consumer.ConsumerTimeoutException;
++
++public class KafkaQueueConsumer implements ODFRunnable {
++	private Logger logger = Logger.getLogger(KafkaQueueConsumer.class.getName());
++	final static int POLLING_DURATION_MS = 100;
++	public static final int MAX_PROCESSING_EXCEPTIONS = 3;
++	public final static int MAX_CONSUMPTION_EXCEPTIONS = 5;
++	
++	public static interface ConsumptionCallback {
++		boolean stopConsumption();
++	}
++
++	private boolean ready = false;
++
++	private String topic;
++	private KafkaConsumer<String, String> kafkaConsumer;
++	private Properties config;
++	private boolean isShutdown = false;
++	private ExecutorService executorService;
++	private QueueMessageProcessor requestConsumer;
++	private int consumptionExceptionCount = 0;
++	private ConsumptionCallback consumptionCallback;
++
++	public KafkaQueueConsumer(String topicName, Properties config, QueueMessageProcessor requestConsumer) {
++		this(topicName, config, requestConsumer, null);
++	}
++	
++	public KafkaQueueConsumer(String topicName, Properties config, QueueMessageProcessor requestConsumer, ConsumptionCallback consumptionCallback) {
++		this.topic = topicName;
++		this.config = config;
++		this.requestConsumer = requestConsumer;
++		this.consumptionCallback = consumptionCallback;
++		if (this.consumptionCallback == null) {
++			this.consumptionCallback = new ConsumptionCallback() {
++
++				@Override
++				public boolean stopConsumption() {
++					// default: never stop
++					return false;
++				}
++				
++			};
++		}
++	}
++
++	public void run() {
++		final String groupId = this.config.getProperty("group.id");
++		while (consumptionExceptionCount < MAX_CONSUMPTION_EXCEPTIONS && !isShutdown) {
++			try {
++				logger.info("Starting consumption for " + groupId);
++				startConsumption();
++			} catch (RuntimeException ex) {
++				if (ex.getCause() instanceof WakeupException) {
++					isShutdown = true;
++				} else {
++					consumptionExceptionCount++;
++					logger.log(Level.WARNING, "Caught exception in KafkaQueueConsumer " + groupId + ", restarting consumption!", ex);
++				}
++				if (this.kafkaConsumer != null) {
++					this.kafkaConsumer.close();
++					this.kafkaConsumer = null;
++				}
++			} catch (Exception e) {
++				consumptionExceptionCount++;
++				logger.log(Level.WARNING, "Caught exception in KafkaQueueConsumer " + groupId + ", restarting consumption!", e);
++				if (this.kafkaConsumer != null) {
++					this.kafkaConsumer.close();
++					this.kafkaConsumer = null;
++				}
++			}
++		}
++		logger.info("Enough consumption for " + groupId);
++		this.ready = false;
++		this.cancel();
++	}
++
++	private void startConsumption() {
++		if (this.consumptionCallback.stopConsumption()) {
++			return;
++		}
++		Exception caughtException = null;
++		final String logPrefix = this + " consumer: [" + this.requestConsumer.getClass().getSimpleName() + "], on " + topic + ": ";
++		try {
++			if (this.kafkaConsumer == null) {
++				logger.fine(logPrefix + " create new consumer for topic " + topic);
++				try {
++					this.kafkaConsumer = new KafkaConsumer<String, String>(config);
++					kafkaConsumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
++
++						@Override
++						public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
++							logger.fine(logPrefix + " partitions revoked " + topic + " new partitions: " + partitions.size());
++						}
++
++						@Override
++						public void onPartitionsAssigned(Collection<TopicPartition> partitions) {						
++							logger.finer(logPrefix + " partitions assigned " + topic + " , new partitions: " + partitions.size());
++							logger.info(logPrefix + "consumer is ready with " + partitions.size() + " partitions assigned");
++							ready = true;
++						}
++					});
++				} catch (ZkTimeoutException zkte) {
++					String zkHosts = config.getProperty("zookeeper.connect");
++					logger.log(Level.SEVERE, logPrefix + " Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
++					throw zkte;
++				}
++			}
++			logger.log(Level.INFO, logPrefix + " Consumer " + "''{1}'' is now listening on ODF queue ''{0}'' with configuration {2}", new Object[] { topic, requestConsumer, config });
++			MessageEncryption msgEncryption = new ODFInternalFactory().create(MessageEncryption.class);
++			while (!Thread.interrupted() && !isShutdown && kafkaConsumer != null) {
++				if (this.consumptionCallback.stopConsumption()) {
++					isShutdown = true;
++					break;
++				}
++				ConsumerRecords<String, String> records = kafkaConsumer.poll(POLLING_DURATION_MS);
++				kafkaConsumer.commitSync(); // commit offset immediately to avoid timeouts for long running processors
++				for (TopicPartition partition : kafkaConsumer.assignment()) {
++					List<ConsumerRecord<String, String>> polledRecords = records.records(partition);
++					//		logger.log(Level.FINEST, logPrefix + "Polling finished got {0} results, continue processing? {1}", new Object[] { polledRecords.size(), continueProcessing });
++					if (!polledRecords.isEmpty()) {
++						logger.fine(polledRecords.get(0).value() + " offset: " + polledRecords.get(0).offset());
++					}
++
++					for (int no = 0; no < polledRecords.size(); no++) {
++						ConsumerRecord<String, String> record = polledRecords.get(no);
++						String s = record.value();
++						logger.log(Level.FINEST, logPrefix + "Decrypting message {0}", s);
++						try {
++							s = msgEncryption.decrypt(s);
++						} catch (Exception exc) {
++							logger.log(Level.WARNING, "Message could not be decrypted, ignoring it", exc);
++							s = null;
++						}
++						if (s != null) {
++							logger.log(Level.FINEST, logPrefix + "Sending message to consumer ''{0}''", s);
++							int exceptionCount = 0;
++							boolean processedSuccessfully = false;
++							while (exceptionCount < MAX_PROCESSING_EXCEPTIONS && !processedSuccessfully) {
++								try {
++									exceptionCount++;
++									this.requestConsumer.process(executorService, s, record.partition(), record.offset());
++									processedSuccessfully = true;
++								} catch (Exception ex) {
++									logger.warning("Exception " + exceptionCount + " caught processing message!");
++								}
++							}
++						}
++					}
++				}
++			}
++		} catch (ConsumerTimeoutException e) {
++			String msg = MessageFormat.format(" Caught timeout on queue ''{0}''", topic);
++			logger.log(Level.WARNING, logPrefix + msg, e);
++			caughtException = e;
++		} catch (Exception exc) {
++			String msg = MessageFormat.format(" Caught exception on queue ''{0}''", topic);
++			logger.log(Level.WARNING, logPrefix + msg, exc);
++			caughtException = exc;
++		} finally {
++			if (kafkaConsumer != null) {
++				logger.log(Level.FINE, logPrefix + "Closing consumer " + " on topic ''{0}''", topic);
++				kafkaConsumer.close();
++				logger.log(Level.FINE, logPrefix + "Closed consumer " + " on topic ''{0}''", topic);
++				kafkaConsumer = null;
++			}
++		}
++		logger.log(Level.INFO, logPrefix + "Finished consumer on topic ''{0}''", topic);
++		if (caughtException != null) {
++			caughtException.printStackTrace();
++			throw new RuntimeException(caughtException);
++		}
++	}
++
++	public void cancel() {
++		logger.log(Level.INFO, "Shutting down consumer on topic ''{0}''", topic);
++		if (this.kafkaConsumer != null) {
++			this.kafkaConsumer.wakeup();
++		}
++		isShutdown = true;
++	}
++
++	public boolean isShutdown() {
++		return isShutdown;
++	}
++
++	@Override
++	public void setExecutorService(ExecutorService service) {
++		this.executorService = service;
++	}
++
++	@Override
++	public boolean isReady() {
++		return ready;
++	}
++
++}
+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java
+new file mode 100755
+index 0000000..e759ecc
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java
+@@ -0,0 +1,488 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.I0Itec.zkclient.exception.ZkTimeoutException;
++import org.apache.atlas.odf.api.OpenDiscoveryFramework;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.engine.KafkaGroupOffsetInfo;
++import org.apache.atlas.odf.api.engine.KafkaStatus;
++import org.apache.atlas.odf.api.engine.KafkaTopicStatus;
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
++import org.apache.atlas.odf.core.notification.NotificationListener;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.kafka.clients.consumer.ConsumerConfig;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.engine.MessagingStatus;
++import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.controlcenter.AdminMessage;
++import org.apache.atlas.odf.core.controlcenter.AdminQueueProcessor;
++import org.apache.atlas.odf.core.controlcenter.ConfigChangeQueueProcessor;
++import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore.StatusQueueProcessor;
++import org.apache.atlas.odf.core.controlcenter.DiscoveryServiceStarter;
++import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
++import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
++import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
++import org.apache.atlas.odf.core.notification.NotificationManager;
++
++import kafka.admin.AdminUtils;
++import kafka.admin.RackAwareMode;
++import kafka.common.TopicExistsException;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++
++public class KafkaQueueManager implements DiscoveryServiceQueueManager {
++
++	public static final String TOPIC_NAME_STATUS_QUEUE = "odf-status-topic";
++	public static final String TOPIC_NAME_ADMIN_QUEUE = "odf-admin-topic";
++	public static final String ADMIN_QUEUE_KEY = "odf-admin-queue-key";
++	public static final String SERVICE_TOPIC_PREFIX = "odf-topic-";
++
++	public static final RackAwareMode DEFAULT_RACK_AWARE_MODE = RackAwareMode.Disabled$.MODULE$;
++	
++	//use static UUID so that no unnecessary consumer threads are started
++	private final static String UNIQUE_SESSION_THREAD_ID = UUID.randomUUID().toString();
++
++	private final static int THREAD_STARTUP_TIMEOUT_MS = 5000;
++	
++	private static List<String> queueConsumerNames = null;
++	private static Object startLock = new Object();
++
++	private final static Logger logger = Logger.getLogger(KafkaQueueManager.class.getName());
++
++	private ThreadManager threadManager;
++	private SettingsManager odfConfig;
++	private String zookeeperConnectString;
++
++	public KafkaQueueManager() {
++		ODFInternalFactory factory = new ODFInternalFactory();
++		threadManager = factory.create(ThreadManager.class);
++		ExecutorServiceFactory esf = factory.create(ExecutorServiceFactory.class);
++		threadManager.setExecutorService(esf.createExecutorService());
++		zookeeperConnectString = factory.create(Environment.class).getZookeeperConnectString();
++		odfConfig = factory.create(SettingsManager.class);
++	}
++	
++	
++	public Properties getConsumerConfigProperties(String consumerGroupID, boolean consumeFromEnd) {
++		Properties kafkaConsumerProps = odfConfig.getKafkaConsumerProperties();
++		kafkaConsumerProps.put("group.id", consumerGroupID);
++		if (zookeeperConnectString != null) {
++			kafkaConsumerProps.put("zookeeper.connect", zookeeperConnectString);
++		}
++		if (consumeFromEnd) {
++			kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
++		} else {
++			kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
++		}
++		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());
++		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);		
++		return kafkaConsumerProps;
++	}
++
++	private String getBootstrapServers() {
++		final List<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnectString);
++		StringBuilder servers = new StringBuilder();
++		final Iterator<String> iterator = brokers.iterator();
++		while(iterator.hasNext()){
++			servers.append(iterator.next());
++			if(iterator.hasNext()){
++				servers.append(",");
++			}
++		}
++		return servers.toString();
++	}
++
++	protected void createTopicIfNotExists(String topicName, int partitionCount, Properties props) {
++		String zkHosts = props.getProperty("zookeeper.connect");
++		ZkClient zkClient = null;
++		try {
++			zkClient = new ZkClient(zkHosts, Integer.valueOf(props.getProperty("zookeeperSessionTimeoutMs")),
++					Integer.valueOf(props.getProperty("zookeeperConnectionTimeoutMs")), ZKStringSerializer$.MODULE$);
++		} catch (ZkTimeoutException zkte) {
++			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
++		}
++		try {
++			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
++			// using partition size 1 and replication size 1, no special
++			// per-topic config needed
++			try {
++				final ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts), false);
++				if (!AdminUtils.topicExists(zkUtils, topicName)) {
++					logger.log(Level.INFO, "Topic ''{0}'' does not exist, creating it", topicName);
++
++					//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
++					KafkaMessagingConfiguration kafkaConfig = ((KafkaMessagingConfiguration) odfConfig.getODFSettings().getMessagingConfiguration());
++					AdminUtils.createTopic(zkUtils, topicName, partitionCount, kafkaConfig.getKafkaBrokerTopicReplication(),
++							new Properties(), DEFAULT_RACK_AWARE_MODE);
++					logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
++					//wait before continuing to make sure the topic exists BEFORE consumers are started
++					try {
++						Thread.sleep(1500);
++					} catch (InterruptedException e) {
++						// TODO Auto-generated catch block
++						e.printStackTrace();
++					}
++				}
++			} catch (TopicExistsException ex) {
++				logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
++			}
++		} finally {
++			if (zkClient != null) {
++				zkClient.close();
++			}
++		}
++	}
++
++
++	private String getTopicName(ServiceRuntime runtime) {
++		return "odf-runtime-" + runtime.getName();
++	}
++	
++	private String getConsumerGroup(ServiceRuntime runtime) {
++		return getTopicName(runtime) + "_group";
++	}
++	
++	private List<ThreadStartupResult> scheduleAllRuntimeConsumers() {
++		List<ThreadStartupResult> results = new ArrayList<>();
++		for (ServiceRuntime runtime : ServiceRuntimes.getActiveRuntimes()) {
++			results.addAll(scheduleRuntimeConsumers(runtime));
++		}
++		return results;
++	}
++	
++	private List<ThreadStartupResult> scheduleRuntimeConsumers(ServiceRuntime runtime) {
++		logger.log(Level.FINER, "Create consumers on queue for runtime ''{0}'' if it doesn't already exist", runtime.getName());
++
++		String topicName = getTopicName(runtime);
++		String consumerGroupId = getConsumerGroup(runtime);
++		Properties kafkaConsumerProps = getConsumerConfigProperties(consumerGroupId, false); // read entries from beginning if consumer was never initialized 
++		String threadName = "RuntimeQueueConsumer" + topicName;
++		List<ThreadStartupResult> result = new ArrayList<ThreadStartupResult>();
++		if (threadManager.getStateOfUnmanagedThread(threadName) != ThreadStatus.ThreadState.RUNNING) {
++			createTopicIfNotExists(topicName, 1, kafkaConsumerProps);
++			ThreadStartupResult startupResult = threadManager.startUnmanagedThread(threadName, new KafkaRuntimeConsumer(runtime, topicName, kafkaConsumerProps, new DiscoveryServiceStarter()));
++			result.add(startupResult);		
++		} else {
++			result.add(new ThreadStartupResult(threadName) {
++				@Override
++				public boolean isNewThreadCreated() {
++					return false;
++				}
++
++				@Override
++				public boolean isReady() {
++					return true;
++				}
++			});
++		}
++		return result;
++	}
++
++	
++	private List<ThreadStartupResult> scheduleConsumerThreads(String topicName, int partitionCount, Properties kafkaConsumerProps, String threadName,
++			List<QueueMessageProcessor> processors) {
++		if (processors.size() != partitionCount) {
++			final String msg = "The number of processors must be equal to the partition count in order to support parallel processing";
++			logger.warning(msg);
++			throw new RuntimeException(msg);
++		}
++		createTopicIfNotExists(topicName, partitionCount, kafkaConsumerProps);
++
++		List<ThreadStartupResult> result = new ArrayList<ThreadStartupResult>();
++		for (int no = 0; no < partitionCount; no++) {
++			if (threadManager.getStateOfUnmanagedThread(threadName + "_" + no) != ThreadStatus.ThreadState.RUNNING) {
++				QueueMessageProcessor processor = processors.get(no);
++				ThreadStartupResult created = threadManager.startUnmanagedThread(threadName + "_" + no, new KafkaQueueConsumer(topicName, kafkaConsumerProps, processor));
++				if (created.isNewThreadCreated()) {
++					logger.log(Level.INFO, "Created new consumer thread on topic ''{0}'' with group ID ''{1}'', thread name: ''{2}'', properties: ''{3}''",
++							new Object[] { topicName, kafkaConsumerProps.getProperty("group.id"), threadName + "_" + no, kafkaConsumerProps.toString() });
++				} else {
++					logger.log(Level.FINE, "Consumer thread with thread name: ''{0}'' already exists, doing nothing", new Object[] { threadName + "_" + no });
++				}
++				result.add(created);
++			} else {
++				result.add(new ThreadStartupResult(threadName) {
++					@Override
++					public boolean isNewThreadCreated() {
++						return false;
++					}
++
++					@Override
++					public boolean isReady() {
++						return true;
++					}
++				});
++			}
++		}
++		return result;
++	}
++
++	private ThreadStartupResult scheduleConsumerThread(String topicName, Properties kafkaConsumerProps, String threadName, QueueMessageProcessor processor) {
++		return scheduleConsumerThreads(topicName, 1, kafkaConsumerProps, threadName, Arrays.asList(processor)).get(0);
++	}
++
++	@Override
++	public void enqueue(AnalysisRequestTracker tracker) {
++		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++		if (dsRequest == null) {
++			throw new RuntimeException("Tracker is finished, should not be enqueued");
++		}
++		String dsID = dsRequest.getDiscoveryServiceId();
++		dsRequest.setPutOnRequestQueue(System.currentTimeMillis());
++		ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(dsID);
++		if (runtime == null) {
++			throw new RuntimeException(MessageFormat.format("Service runtime for service ''{0}'' was not found.", dsID));
++		}
++		enqueueJSONMessage(getTopicName(runtime), tracker, tracker.getRequest().getId());
++	}
++
++	private void enqueueJSONMessage(String topicName, Object jsonObject, String key) {
++		String value = null;
++		try {
++			value = JSONUtils.toJSON(jsonObject);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++		new ODFInternalFactory().create(KafkaProducerManager.class).sendMsg(topicName, key, value);
++	}
++
++	List<ThreadStartupResult> scheduleStatusQueueConsumers() {
++		logger.log(Level.FINER, "Create consumers on status queue if they don't already exist");
++		List<ThreadStartupResult> results = new ArrayList<ThreadStartupResult>();
++
++		// create consumer thread for the status watcher of all trackes
++		String statusWatcherConsumerGroupID = "DSStatusWatcherConsumerGroup" + UNIQUE_SESSION_THREAD_ID; // have a new group id on each node that reads all from the beginning
++		// always read from beginning for the status queue
++		Properties statusWatcherKafkaConsumerProps = getConsumerConfigProperties(statusWatcherConsumerGroupID, false);
++		final String statusWatcherThreadName = "StatusWatcher" + TOPIC_NAME_STATUS_QUEUE; // a fixed name
++		String threadNameWithPartition = statusWatcherThreadName + "_0";
++		final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadNameWithPartition);
++		logger.fine("State of status watcher thread: " + stateOfUnmanagedThread);
++		if (stateOfUnmanagedThread != ThreadStatus.ThreadState.RUNNING) {
++			final ThreadStartupResult scheduleConsumerThread = scheduleConsumerThread(TOPIC_NAME_STATUS_QUEUE, statusWatcherKafkaConsumerProps, statusWatcherThreadName,
++					new StatusQueueProcessor());
++			results.add(scheduleConsumerThread);
++		} else {
++			results.add(new ThreadStartupResult(statusWatcherThreadName) {
++				@Override
++				public boolean isNewThreadCreated() {
++					return false;
++				}
++
++				@Override
++				public boolean isReady() {
++					return true;
++				}
++			});
++		}
++
++		return results;
++	}
++
++
++	@Override
++	public void enqueueInStatusQueue(StatusQueueEntry sqe) {
++		enqueueJSONMessage(TOPIC_NAME_STATUS_QUEUE, sqe, StatusQueueEntry.getRequestId(sqe));
++	}
++
++
++	private List<ThreadStartupResult> scheduleAdminQueueConsumers() {
++		List<ThreadStartupResult> results = new ArrayList<ThreadStartupResult>();
++		//schedule admin queue consumers
++		// consumer group so that every node receives events
++		String adminWatcherConsumerGroupID = "DSAdminQueueConsumerGroup" + UNIQUE_SESSION_THREAD_ID; // have a new group id on each node 
++		Properties adminWatcherKafkaConsumerProps = getConsumerConfigProperties(adminWatcherConsumerGroupID, true);
++		final String adminWatcherThreadName = "AdminWatcher" + TOPIC_NAME_ADMIN_QUEUE;
++		String threadNameWithPartition = adminWatcherThreadName + "_0";
++		if (threadManager.getStateOfUnmanagedThread(threadNameWithPartition) != ThreadStatus.ThreadState.RUNNING) {
++			results.add(scheduleConsumerThread(TOPIC_NAME_ADMIN_QUEUE, adminWatcherKafkaConsumerProps, adminWatcherThreadName, new AdminQueueProcessor()));
++			// consumer group so only one node receives events
++			String distributedAdminConsumerGroup = "DSAdminQueueConsumerGroupCommon";
++			Properties kafkaProps = getConsumerConfigProperties(distributedAdminConsumerGroup, true);
++			final String threadName = "DistributedAdminWatcher";
++			results.add(scheduleConsumerThread(TOPIC_NAME_ADMIN_QUEUE, kafkaProps, threadName, new ConfigChangeQueueProcessor()));
++		} else {
++			results.add(new ThreadStartupResult(adminWatcherThreadName) {
++				@Override
++				public boolean isNewThreadCreated() {
++					return false;
++				}
++
++				@Override
++				public boolean isReady() {
++					return true;
++				}
++			});
++		}
++		return results;
++	}
++
++	@Override
++	public void enqueueInAdminQueue(AdminMessage message) {
++		enqueueJSONMessage(TOPIC_NAME_ADMIN_QUEUE, message, ADMIN_QUEUE_KEY);
++	}
++
++	@Override
++	public void start() throws TimeoutException {
++		synchronized (startLock) {
++			if (queueConsumerNames == null) {
++				List<ThreadStartupResult> results = new ArrayList<>();
++				results.addAll(scheduleStatusQueueConsumers());
++				results.addAll(scheduleAdminQueueConsumers());
++				results.addAll(scheduleAllRuntimeConsumers());
++				results.addAll(scheduleNotificationListenerThreads());
++				List<String> consumerNames = new ArrayList<>();
++				for (ThreadStartupResult tsr : results) {
++					consumerNames.add(tsr.getThreadId());
++				}
++				queueConsumerNames = consumerNames;
++				this.threadManager.waitForThreadsToBeReady(THREAD_STARTUP_TIMEOUT_MS * results.size(), results);
++				logger.info("KafkaQueueManager successfully initialized");
++			}
++		}
++	}
++	
++	public void stop() {
++		synchronized (startLock) {
++			if (queueConsumerNames != null) {
++				threadManager.shutdownThreads(queueConsumerNames);
++				queueConsumerNames = null;
++			}
++		}
++	}
++
++	@Override
++	public MessagingStatus getMessagingStatus() {
++		KafkaStatus status = new KafkaStatus();
++		KafkaMonitor monitor = new ODFInternalFactory().create(KafkaMonitor.class);
++		status.setBrokers(monitor.getBrokers(zookeeperConnectString));
++
++		List<String> topics = new ArrayList<String>(Arrays.asList(KafkaQueueManager.TOPIC_NAME_ADMIN_QUEUE, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE));
++		for (DiscoveryServiceProperties info : new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
++			topics.add(KafkaQueueManager.SERVICE_TOPIC_PREFIX + info.getId());
++		}
++
++		List<KafkaTopicStatus> topicStatusList = new ArrayList<KafkaTopicStatus>();
++		for (String topic : topics) {
++			KafkaTopicStatus topicStatus = getTopicStatus(topic, monitor);
++			topicStatusList.add(topicStatus);
++		}
++		status.setTopicStatus(topicStatusList);
++		return status;
++	}
++
++	private KafkaTopicStatus getTopicStatus(String topic, KafkaMonitor monitor) {
++		KafkaTopicStatus topicStatus = new KafkaTopicStatus();
++		topicStatus.setTopic(topic);
++		topicStatus.setBrokerPartitionMessageInfo(monitor.getMessageCountForTopic(zookeeperConnectString, topic));
++
++		List<KafkaGroupOffsetInfo> offsetInfoList = new ArrayList<KafkaGroupOffsetInfo>();
++		List<String> consumerGroupsFromZookeeper = monitor.getConsumerGroups(zookeeperConnectString, topic);
++		for (String group : consumerGroupsFromZookeeper) {
++			KafkaGroupOffsetInfo offsetInfoContainer = new KafkaGroupOffsetInfo();
++			offsetInfoContainer.setGroupId(group);
++			List<PartitionOffsetInfo> offsetsForTopic = monitor.getOffsetsForTopic(zookeeperConnectString, group, topic);
++			for (PartitionOffsetInfo info : offsetsForTopic) {
++				// to reduce clutter, only if at least 1 partition has an offset > -1 (== any offset) for this consumer group, 
++				// it will be included in the result
++				if (info.getOffset() > -1) {
++					offsetInfoContainer.setOffsets(offsetsForTopic);
++					offsetInfoList.add(offsetInfoContainer);
++					break;
++				}
++			}
++		}
++		topicStatus.setConsumerGroupOffsetInfo(offsetInfoList);
++
++		topicStatus.setPartitionBrokersInfo(monitor.getPartitionInfoForTopic(zookeeperConnectString, topic));
++		return topicStatus;
++	}
++
++	private List<ThreadStartupResult> scheduleNotificationListenerThreads() {
++		NotificationManager nm = new ODFInternalFactory().create(NotificationManager.class);
++		List<NotificationListener> listeners = nm.getListeners();
++		List<ThreadStartupResult> result = new ArrayList<>();
++		if (listeners == null) {
++			return result;
++		}
++		final OpenDiscoveryFramework odf = new ODFFactory().create();
++		for (final NotificationListener listener : listeners) {
++			String topicName = listener.getTopicName();
++			String consumerGroupId = "ODFNotificationGroup" + topicName;
++			Properties kafkaConsumerProps = getConsumerConfigProperties(consumerGroupId, true);  
++			String threadName = "NotificationListenerThread" + topicName;
++			if (threadManager.getStateOfUnmanagedThread(threadName) != ThreadStatus.ThreadState.RUNNING) {
++				KafkaQueueConsumer consumer = new KafkaQueueConsumer(topicName, kafkaConsumerProps, new QueueMessageProcessor() {
++					
++					@Override
++					public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
++						try {
++							listener.onEvent(msg, odf);
++						} catch(Exception exc) {
++							String errorMsg = MessageFormat.format("Notification listsner ''{0}'' has thrown an exception. Ignoring it", listener.getName());
++							logger.log(Level.WARNING, errorMsg, exc);
++						}
++					}
++				});
++				ThreadStartupResult startupResult = threadManager.startUnmanagedThread(threadName, consumer);
++				result.add(startupResult);		
++			} else {
++				result.add(new ThreadStartupResult(threadName) {
++					@Override
++					public boolean isNewThreadCreated() {
++						return false;
++					}
++
++					@Override
++					public boolean isReady() {
++						return true;
++					}
++				});
++			}
++		}
++		return result;
++	}
++	
++}
+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java
+new file mode 100755
+index 0000000..73d98e7
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java
+@@ -0,0 +1,104 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.util.Properties;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
++
++/**
++ * This consumer is started for a certain runtime and starts a KafkaQueueConsumer if
++ * the runtime is available. 
++ * 
++ *
++ */
++public class KafkaRuntimeConsumer implements ODFRunnable {
++
++	Logger logger = Logger.getLogger(KafkaRuntimeConsumer.class.getName());
++
++	private ServiceRuntime runtime;
++	private boolean isShutdown = false;
++	private ExecutorService executorService = null;
++	private KafkaQueueConsumer kafkaQueueConsumer = null;
++
++	private String topic;
++	private Properties config;
++	private QueueMessageProcessor processor;
++
++	private KafkaQueueConsumer.ConsumptionCallback callback = new KafkaQueueConsumer.ConsumptionCallback() {
++		@Override
++		public boolean stopConsumption() {
++			return isShutdown || (runtime.getWaitTimeUntilAvailable() > 0);
++		}
++	};
++
++	public KafkaRuntimeConsumer(ServiceRuntime runtime, String topicName, Properties config, QueueMessageProcessor processor) {
++		this.runtime = runtime;
++		this.processor = processor;
++		this.topic = topicName;
++		this.config = config;
++	}
++
++	@Override
++	public void run() {
++		logger.log(Level.INFO, "Starting runtime consumer for topic ''{0}''", topic);
++		while (!isShutdown) {
++			long waitTime = runtime.getWaitTimeUntilAvailable();
++			if (waitTime <= 0) {
++				logger.log(Level.INFO, "Starting Kafka consumer for topic ''{0}''", topic);
++				kafkaQueueConsumer = new KafkaQueueConsumer(topic, config, processor, callback);
++				kafkaQueueConsumer.setExecutorService(executorService);
++				// run consumer synchronously
++				kafkaQueueConsumer.run();
++				logger.log(Level.INFO, "Kafka consumer for topic ''{0}'' is finished", topic);
++
++				// if we are here, this means that the consumer was cancelled
++				// either directly or (more likely) through the Consumption callback 
++				kafkaQueueConsumer = null;
++			} else {
++				try {
++					logger.log(Level.FINER, "Runtime ''{0}'' is not available, waiting for ''{1}''ms", new Object[]{runtime.getName(), waitTime });
++					Thread.sleep(waitTime);
++				} catch (InterruptedException e) {
++					throw new RuntimeException(e);
++				}
++			}
++		}
++		logger.log(Level.INFO, "Kafka runtime consumer for topic ''{0}'' has shut down", topic);
++	}
++
++	@Override
++	public void setExecutorService(ExecutorService executorService) {
++		this.executorService = executorService;
++	}
++
++	@Override
++	public void cancel() {
++		isShutdown = true;
++		if (kafkaQueueConsumer != null) {
++			kafkaQueueConsumer.cancel();
++		}
++	}
++
++	@Override
++	public boolean isReady() {
++		return true;
++	}
++
++}
+diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java
+new file mode 100755
+index 0000000..9c08f3a
+--- /dev/null
++++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java
+@@ -0,0 +1,224 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.messaging.kafka;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.HashMap;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.Map.Entry;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.ExecutorService;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.exception.ZkTimeoutException;
++import org.apache.kafka.clients.consumer.ConsumerConfig;
++import org.apache.kafka.clients.consumer.ConsumerRecord;
++import org.apache.kafka.clients.consumer.ConsumerRecords;
++import org.apache.kafka.clients.consumer.KafkaConsumer;
++
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
++import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
++
++public class MessageSearchConsumer implements ODFRunnable {
++	private static final long POLLING_DURATION_MS = 100;
++	private static final int MAX_POLL_COUNT = 5;
++
++	private Logger logger = Logger.getLogger(MessageSearchConsumer.class.getName());
++	private SearchCompletedCallback searchCompletedCallback;
++	private List<String> searchStrings;
++	protected String topic;
++	private KafkaConsumer<String, String> kafkaConsumer;
++	private boolean shutdown;
++	private boolean ready = false;
++	private List<PartitionOffsetInfo> maxOffsetsForTopic = new ArrayList<PartitionOffsetInfo>();
++
++
++	public MessageSearchConsumer(String topic, SearchCompletedCallback completitionCallback, List<String> searchStrings) {
++		setTopic(topic);
++		setSearchStrings(searchStrings);
++		setCompletitionCallback(completitionCallback);
++	}
++
++	public MessageSearchConsumer() {
++	}
++
++	protected List<PartitionOffsetInfo> retrieveTopicOffsets() {
++		List<PartitionOffsetInfo> offsetsForTopic = new ArrayList<PartitionOffsetInfo>();
++		String zookeeperConnect = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++
++		if (zookeeperConnect != null) {
++			final KafkaMonitor create = new ODFInternalFactory().create(KafkaMonitor.class);
++			for (int part : create.getPartitionsForTopic(zookeeperConnect, this.topic)) {
++				offsetsForTopic.add(create.getOffsetsOfLastMessagesForTopic(zookeeperConnect, this.topic, part));
++			}
++		}
++		return offsetsForTopic;
++	}
++
++	public void setTopic(String topic) {
++		this.topic = topic;
++	}
++
++	public void setSearchStrings(List<String> searchStrings) {
++		this.searchStrings = searchStrings;
++	}
++
++	public void setCompletitionCallback(SearchCompletedCallback completitionCallback) {
++		this.searchCompletedCallback = completitionCallback;
++	}
++
++	protected Properties getKafkaConsumerProperties() {
++		Properties consumerProperties = new ODFFactory().create().getSettingsManager().getKafkaConsumerProperties();
++		consumerProperties.put("group.id", UUID.randomUUID().toString() + "_searchConsumer");
++		final String zookeeperConnect = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++		consumerProperties.put("zookeeper.connect", zookeeperConnect);
++		consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
++		consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnect).iterator();
++		StringBuilder brokersString = new StringBuilder();
++		while (brokers.hasNext()) {
++			brokersString.append(brokers.next());
++			if (brokers.hasNext()) {
++				brokersString.append(",");
++			}
++		}
++		consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++		consumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
++		return consumerProperties;
++	}
++
++	@Override
++	public void run() {
++		this.maxOffsetsForTopic = retrieveTopicOffsets();
++		final String logPrefix = "Consumer for topic " + topic + ": ";
++		try {
++
++			Map<Integer, Boolean> maxOffsetReachedMap = new HashMap<Integer, Boolean>();
++			if (maxOffsetsForTopic.isEmpty()) {
++				logger.info("No offsets found for topic " + this.topic + ", therefore no matching messages can be found");
++				if (searchCompletedCallback != null) {
++					searchCompletedCallback.onDoneSearching(new HashMap<String, PartitionOffsetInfo>());
++					return;
++				}
++			}
++			for (PartitionOffsetInfo info : maxOffsetsForTopic) {
++				//if the max offset is -1, no message exists on the partition
++				if (info.getOffset() > -1) {
++					maxOffsetReachedMap.put(info.getPartitionId(), false);
++				}
++			}
++
++			Map<String, PartitionOffsetInfo> resultMap = new HashMap<String, PartitionOffsetInfo>();
++
++			Properties consumerProperties = getKafkaConsumerProperties();
++
++			if (this.kafkaConsumer == null) {
++				logger.fine(logPrefix + " create new consumer for topic " + topic);
++				try {
++					this.kafkaConsumer = new KafkaConsumer<String, String>(consumerProperties);
++					//In order to prevent other consumers from getting assigned this partition during a rebalance, the partition(s) MUST be assigned manually (not using auto assign because of subscribe())
++					kafkaConsumer.subscribe(Arrays.asList(topic));
++				} catch (ZkTimeoutException zkte) {
++					String zkHosts = consumerProperties.getProperty("zookeeper.connect");
++					logger.log(Level.SEVERE, logPrefix + " Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
++					throw zkte;
++				}
++			}
++			logger.log(Level.INFO, logPrefix + " Consumer " + "''{1}'' is now listening on ODF queue ''{0}'' with configuration {2}",
++					new Object[] { topic, kafkaConsumer, consumerProperties });
++
++			int pollCount = 0;
++			while (!Thread.interrupted() && pollCount < MAX_POLL_COUNT && !shutdown && kafkaConsumer != null) {
++				logger.info("searching ...");
++				pollCount++;
++				ConsumerRecords<String, String> records = kafkaConsumer.poll(POLLING_DURATION_MS);
++				ready = true;
++				final Iterator<ConsumerRecord<String, String>> polledRecords = records.records(topic).iterator();
++				
++				while (polledRecords.hasNext() && !shutdown) {
++					final ConsumerRecord<String, String> next = polledRecords.next();
++					for (String s : searchStrings) {
++						if ((next.key() != null && next.key().equals(s)) || (next.value() != null && next.value().contains(s))) {
++							final PartitionOffsetInfo position = new PartitionOffsetInfo();
++							position.setOffset(next.offset());
++							position.setPartitionId(next.partition());
++							resultMap.put(s, position);
++						}
++					}
++
++					if (next.offset() == maxOffsetsForTopic.get(next.partition()).getOffset()) {
++						maxOffsetReachedMap.put(next.partition(), true);
++					}
++
++					boolean allCompleted = true;
++					for (Entry<Integer, Boolean> entry : maxOffsetReachedMap.entrySet()) {
++						if (!entry.getValue()) {
++							allCompleted = false;
++							break;
++						}
++					}
++
++					if (allCompleted) {
++						logger.info("Done searching all messages");
++						if (searchCompletedCallback != null) {
++							searchCompletedCallback.onDoneSearching(resultMap);
++							return;
++						}
++						shutdown = true;
++					}
++				}
++			}
++		} catch (Exception exc) {
++			String msg = MessageFormat.format(" Caught exception on queue ''{0}''", topic);
++			logger.log(Level.WARNING, logPrefix + msg, exc);
++		} finally {
++			if (kafkaConsumer != null) {
++				logger.log(Level.FINE, logPrefix + "Closing consumer " + " on topic ''{0}''", topic);
++				kafkaConsumer.close();
++				logger.log(Level.FINE, logPrefix + "Closed consumer " + " on topic ''{0}''", topic);
++				kafkaConsumer = null;
++			}
++		}
++		logger.log(Level.FINE, logPrefix + "Finished consumer on topic ''{0}''", topic);
++	}
++
++	@Override
++	public void setExecutorService(ExecutorService service) {
++
++	}
++
++	@Override
++	public void cancel() {
++		this.shutdown = true;
++	}
++
++	@Override
++	public boolean isReady() {
++		return ready;
++	}
++
++	public interface SearchCompletedCallback {
++		void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap);
++	}
++}
+diff --git a/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..95c1f71
+--- /dev/null
++++ b/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,14 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++DiscoveryServiceQueueManager=org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java
+new file mode 100755
+index 0000000..396193f
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java
+@@ -0,0 +1,137 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.ExecutionException;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.I0Itec.zkclient.exception.ZkTimeoutException;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
++import org.apache.kafka.clients.producer.KafkaProducer;
++import org.apache.kafka.clients.producer.ProducerConfig;
++import org.apache.kafka.clients.producer.ProducerRecord;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.engine.ThreadStatus.ThreadState;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++
++import kafka.admin.AdminUtils;
++import kafka.common.TopicExistsException;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++
++public class KafkaQueueConsumerExceptionTest extends ODFTestcase {
++	static Logger logger = ODFTestLogger.get();
++	static final String topicName = "my_dummy_test_topic";
++	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++
++	@BeforeClass
++	public static void setupTopic() {
++		ZkClient zkClient = null;
++		try {
++			zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
++			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
++			// using partition size 1 and replication size 1, no special
++			// per-topic config needed
++			logger.log(Level.FINE, "Topic ''{0}'' does not exist, creating it", topicName);
++			//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
++			AdminUtils.createTopic(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false), topicName, 1, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
++			logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
++		} catch (TopicExistsException ex) {
++			logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
++		} catch (ZkTimeoutException zkte) {
++			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zookeeperHost);
++		} finally {
++			if (zkClient != null) {
++				zkClient.close();
++			}
++		}
++	}
++
++	@Test
++	public void testExceptionAndRetryDuringProcessing() throws InterruptedException, ExecutionException, TimeoutException {
++		final ODFInternalFactory odfFactory = new ODFInternalFactory();
++		final String groupId = "retrying-exception-dummy-consumer";
++		Properties kafkaConsumerProperties = new KafkaQueueManager().getConsumerConfigProperties(groupId, true);
++		kafkaConsumerProperties.put("group.id", groupId);
++		final List<String> consumedMsgs1 = new ArrayList<String>();
++		KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, new QueueMessageProcessor() {
++
++			@Override
++			public void process(ExecutorService executorService, String msg, int partition, long offset) {
++				consumedMsgs1.add(msg);
++				logger.info("retry_consumer process " + msg + " throw exception and try again");
++				throw new RuntimeException("Oops!");
++			}
++		});
++
++		final ThreadManager threadManager = odfFactory.create(ThreadManager.class);
++		final String consumerThread = "TEST_CONSUMER_RETRY_RUNNING";
++		threadManager.waitForThreadsToBeReady(10000, Arrays.asList(threadManager.startUnmanagedThread(consumerThread, cnsmr)));
++
++		sendMsg("TEST_MSG");
++		sendMsg("TEST_MSG2");
++
++		Thread.sleep(2000);
++
++		Assert.assertEquals(2 * KafkaQueueConsumer.MAX_PROCESSING_EXCEPTIONS, consumedMsgs1.size());
++
++		final ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(consumerThread);
++		Assert.assertEquals(ThreadState.RUNNING, stateOfUnmanagedThread);
++	}
++
++	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
++		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++
++		Properties props = odfConfig.getKafkaProducerProperties();
++		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
++		StringBuilder brokersString = new StringBuilder();
++		while (brokers.hasNext()) {
++			brokersString.append(brokers.next());
++			if (brokers.hasNext()) {
++				brokersString.append(",");
++			}
++		}
++		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++
++		final KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
++		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
++		producer.send(producerRecord).get(3000, TimeUnit.MILLISECONDS);
++		producer.close();
++	}
++
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java
+new file mode 100755
+index 0000000..cff538c
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java
+@@ -0,0 +1,303 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.ExecutionException;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.settings.MessagingConfiguration;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
++import org.apache.kafka.clients.consumer.ConsumerConfig;
++import org.apache.kafka.clients.producer.Callback;
++import org.apache.kafka.clients.producer.RecordMetadata;
++import org.apache.wink.json4j.JSONException;
++import org.junit.AfterClass;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Ignore;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
++import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.engine.ThreadStatus.ThreadState;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
++import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
++import org.apache.atlas.odf.core.controlcenter.DefaultThreadManager;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
++import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaProducerManager;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class KafkaQueueManagerTest extends ODFTestBase {
++
++	private static Long origRetention;
++	Logger logger = ODFTestLogger.get();
++	String zookeeperConnectString = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++
++	@BeforeClass
++	public static void setupTrackerRetention() throws ValidationException {
++		SettingsManager settingsManager = new ODFFactory().create().getSettingsManager();
++		//SETUP RETENTION TO KEEP TRACKERS!!!
++		final MessagingConfiguration messagingConfiguration = settingsManager.getODFSettings().getMessagingConfiguration();
++		origRetention = messagingConfiguration.getAnalysisRequestRetentionMs();
++		messagingConfiguration.setAnalysisRequestRetentionMs(120000000l);
++
++		ODFTestLogger.get().info("Set request retention to " + settingsManager.getODFSettings().getMessagingConfiguration().getAnalysisRequestRetentionMs());
++	}
++
++	@AfterClass
++	public static void cleanupTrackerRetention() throws ValidationException {
++		SettingsManager settingsManager = new ODFFactory().create().getSettingsManager();
++		ODFSettings settings = settingsManager.getODFSettings();
++		settings.getMessagingConfiguration().setAnalysisRequestRetentionMs(origRetention);
++		settingsManager.updateODFSettings(settings);
++	}
++
++	@Test
++	public void testStatusQueue() throws Exception {
++		KafkaQueueManager kqm = new KafkaQueueManager();
++
++		logger.info("Queue manager created");
++		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
++
++		long before = System.currentTimeMillis();
++		tracker.setLastModified(before);
++		int maxEntries = 10;
++		for (int i = 0; i < maxEntries; i++) {
++			tracker.getRequest().setId("id" + i);
++			StatusQueueEntry sqe = new StatusQueueEntry();
++			sqe.setAnalysisRequestTracker(tracker);
++			kqm.enqueueInStatusQueue(sqe);
++
++			//			System.out.println("tracker "+i+" enqueued in status queue");
++		}
++		long after = System.currentTimeMillis();
++		logger.info("Time for enqueueing " + maxEntries + " objects: " + (after - before) + ", " + ((after - before) / maxEntries) + "ms per object");
++		Thread.sleep(100 * maxEntries);
++
++		AnalysisRequestTrackerStore store = new DefaultStatusQueueStore();
++
++		for (int i = 0; i < maxEntries; i++) {
++			logger.info("Querying status " + i);
++			AnalysisRequestTracker queriedTracker = store.query("id" + i);
++			Assert.assertNotNull(queriedTracker);
++			Assert.assertEquals(STATUS.FINISHED, queriedTracker.getStatus());
++		}
++
++		//	Thread.sleep(5000);
++		//	Assert.fail("you fail");
++		logger.info("Test testEnqueueStatusQueue finished");
++	}
++
++	/**
++	 * This test creates a tracker, puts it on the status queue, kills the service consumer and creates a new dummy consumer to put the offset of the service consumer behind the new tracker
++	 * Then the status consumer is shut down and its offset is reset in order to make it consume from the start again and thereby cleaning up stuck processes
++	 * Then kafka queue manager is re-initialized, causing all consumers to come up and triggering the cleanup process
++	 */
++	@Test
++	@Ignore("Adjust once ServiceRuntimes are fully implemented")
++	public void testStuckRequestCleanup() throws JSONException, InterruptedException, ExecutionException, TimeoutException {
++		final AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json",
++				null);
++		tracker.setStatus(STATUS.IN_DISCOVERY_SERVICE_QUEUE);
++		tracker.setNextDiscoveryServiceRequest(0);
++		tracker.setLastModified(System.currentTimeMillis());
++		final String newTrackerId = "KAFKA_QUEUE_MANAGER_09_TEST" + UUID.randomUUID().toString();
++		tracker.getRequest().setId(newTrackerId);
++		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
++		final DiscoveryServiceProperties discoveryServiceRegistrationInfo = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()
++				.get(0);
++		dsRequest.setDiscoveryServiceId(discoveryServiceRegistrationInfo.getId());
++		String dsID = dsRequest.getDiscoveryServiceId();
++		String topicName = KafkaQueueManager.SERVICE_TOPIC_PREFIX + dsID;
++		//Add tracker to queue, set offset behind request so that it should be cleanup
++
++		String consumerGroupId = "odf-topic-" + dsID + "_group";
++		String threadName = "Dummy_DiscoveryServiceQueueConsumer" + topicName;
++
++		final List<Throwable> multiThreadErrors = new ArrayList<Throwable>();
++		final DefaultThreadManager tm = new DefaultThreadManager();
++		logger.info("shutdown old test 09 consumer and replace with fake doing nothing");
++		for (int no = 0; no < discoveryServiceRegistrationInfo.getParallelismCount(); no++) {
++			tm.shutdownThreads(Collections.singletonList("DiscoveryServiceQueueConsumer" + topicName + "_" + no));
++		}
++		Properties kafkaConsumerProps = getKafkaConsumerConfigProperties(consumerGroupId);
++
++		final long[] producedMsgOffset = new long[1];
++
++		final CountDownLatch msgProcessingLatch = new CountDownLatch(1);
++		ThreadStartupResult created = tm.startUnmanagedThread(threadName, new KafkaQueueConsumer(topicName, kafkaConsumerProps, new QueueMessageProcessor() {
++
++			@Override
++			public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
++				logger.info("Dequeue without processing " + msgOffset);
++				if (msgOffset == producedMsgOffset[0]) {
++					try {
++						msgProcessingLatch.countDown();
++					} catch (Exception e) {
++						msgProcessingLatch.countDown();
++						multiThreadErrors.add(e);
++					}
++				}
++			}
++
++		}));
++
++		tm.waitForThreadsToBeReady(30000, Arrays.asList(created));
++
++		String key = tracker.getRequest().getId();
++		String value = JSONUtils.toJSON(tracker);
++
++		new DefaultStatusQueueStore().store(tracker);
++
++		KafkaMonitor kafkaMonitor = new ODFInternalFactory().create(KafkaMonitor.class);
++		List<String> origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
++		logger.info("Found status consumers: " + origQueueConsumers.toString() + ", shutting down StatusWatcher");
++
++		//kill status queue watcher so that it is restarted when queue manager is initialized and detects stuck requests
++		tm.shutdownThreads(Collections.singletonList("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0"));
++
++		int maxWaitForConsumerDeath = 60;
++		while (tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0") != ThreadState.NON_EXISTENT
++				|| tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0") != ThreadState.FINISHED && maxWaitForConsumerDeath > 0) {
++			maxWaitForConsumerDeath--;
++			Thread.sleep(500);
++		}
++
++		logger.info("Only 1 consumer left? " + maxWaitForConsumerDeath + " : " + tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0"));
++		logger.info(" set offset for status consumer to beginning so that it consumes from when restarting");
++		final int offset = 1000000;
++		for (String statusConsumerGroup : origQueueConsumers) {
++			if (statusConsumerGroup.contains("DSStatusWatcherConsumerGroup")) {
++				boolean success = false;
++				int retryCount = 0;
++				final int maxOffsetRetry = 20;
++				while (!success && retryCount < maxOffsetRetry) {
++					success = kafkaMonitor.setOffset(zookeeperConnectString, statusConsumerGroup, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE, 0, offset);
++					retryCount++;
++					Thread.sleep(500);
++				}
++
++				Assert.assertNotEquals(retryCount, maxOffsetRetry);
++				Assert.assertTrue(success);
++			}
++		}
++
++		new ODFInternalFactory().create(KafkaProducerManager.class).sendMsg(topicName, key, value, new Callback() {
++
++			@Override
++			public void onCompletion(RecordMetadata metadata, Exception exception) {
++				producedMsgOffset[0] = metadata.offset();
++			}
++		});
++
++		final boolean await = msgProcessingLatch.await(240, TimeUnit.SECONDS);
++		Assert.assertTrue(await);
++		if (await) {
++			logger.info("run after message consumption...");
++			AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
++			AnalysisRequestTracker storeTracker = store.query(tracker.getRequest().getId());
++			Assert.assertEquals(tracker.getRequest().getId(), storeTracker.getRequest().getId());
++			Assert.assertEquals(STATUS.IN_DISCOVERY_SERVICE_QUEUE, storeTracker.getStatus());
++
++			//start odf and cleanup here...
++			logger.info("shutdown all threads and restart ODF");
++			tm.shutdownAllUnmanagedThreads();
++
++			int threadKillRetry = 0;
++			while (tm.getNumberOfRunningThreads() > 0 && threadKillRetry < 20) {
++				Thread.sleep(500);
++				threadKillRetry++;
++			}
++
++			logger.info("All threads down, restart ODF " + threadKillRetry);
++
++			// Initialize analysis manager
++			new ODFFactory().create().getAnalysisManager();
++
++			kafkaMonitor = new ODFInternalFactory().create(KafkaMonitor.class);
++			origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
++			int healthRetrieveRetry = 0;
++			//wait for max of 40 secs for status consumer to come up. If it is, we can continue because ODF is restarted successfully
++			while (origQueueConsumers.isEmpty() && healthRetrieveRetry < 240) {
++				healthRetrieveRetry++;
++				Thread.sleep(500);
++				origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
++			}
++			Assert.assertNotEquals(healthRetrieveRetry, 240);
++
++			logger.info("initialized, wait for cleanup ... " + healthRetrieveRetry);
++			Thread.sleep(5000);
++			logger.info("Found health consumers: " + origQueueConsumers.toString());
++			logger.info("hopefully cleaned up ...");
++			AnalysisRequestTracker storedTracker = store.query(tracker.getRequest().getId());
++			Assert.assertEquals(STATUS.ERROR, storedTracker.getStatus());
++			logger.info("DONE CLEANING UP, ALL FINE");
++		}
++
++		Assert.assertEquals(0, multiThreadErrors.size());
++	}
++
++	public Properties getKafkaConsumerConfigProperties(String consumerGroupID) {
++		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++		Properties kafkaConsumerProps = odfConfig.getKafkaConsumerProperties();
++		kafkaConsumerProps.put("group.id", consumerGroupID);
++		if (zookeeperConnectString != null) {
++			kafkaConsumerProps.put("zookeeper.connect", zookeeperConnectString);
++		}
++
++		kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
++
++		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		StringBuilder bld = new StringBuilder();
++		final Iterator<String> iterator = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnectString).iterator();
++		while (iterator.hasNext()) {
++			bld.append(iterator.next());
++			if (iterator.hasNext()) {
++				bld.append(",");
++			}
++		}
++		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bld.toString());
++		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
++
++		return kafkaConsumerProps;
++	}
++
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java
+new file mode 100755
+index 0000000..35b09e2
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java
+@@ -0,0 +1,193 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.Map;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.ExecutionException;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.apache.atlas.odf.core.messaging.kafka.MessageSearchConsumer;
++import org.apache.kafka.clients.producer.KafkaProducer;
++import org.apache.kafka.clients.producer.ProducerConfig;
++import org.apache.kafka.clients.producer.ProducerRecord;
++import org.junit.After;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.test.ODFTestBase;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++
++import kafka.admin.AdminUtils;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++
++public class MessageSearchConsumerTest extends ODFTestBase {
++	private static final String TEST_SEARCH_STRING = "TEST_STRING_" + UUID.randomUUID().toString();
++	private static final String TEST_SEARCH_FAILURE_STRING = "TEST_FAILURE_STRING";
++	static Logger logger = ODFTestLogger.get();
++	final static String topicName = "MessageSearchConsumerTest" + UUID.randomUUID().toString();
++	private static final int PERFORMANCE_MSG_COUNT = 1000;
++	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++	private KafkaProducer<String, String> producer;
++
++	@BeforeClass
++	public static void createTopc() {
++		ZkClient zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
++		ZkUtils utils = new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false);
++		if (!AdminUtils.topicExists(utils, topicName)) {
++			AdminUtils.createTopic(utils, topicName, 2, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
++		}
++	}
++
++	@Test
++	public void testMsgSearchPerformance() throws InterruptedException, ExecutionException, TimeoutException {
++		logger.info("Producing msgs");
++		for (int no = 0; no < PERFORMANCE_MSG_COUNT; no++) {
++			sendMsg("DUMMY_MSG" + no);
++		}
++		sendMsg(TEST_SEARCH_STRING);
++		logger.info("Done producing ...");
++		Thread.sleep(200);
++
++		final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
++		final CountDownLatch searchLatch = new CountDownLatch(1);
++		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
++
++			@Override
++			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
++				logger.info("Done searching " + msgPositionMap.get(TEST_SEARCH_STRING).getOffset());
++				Assert.assertTrue(msgPositionMap.get(TEST_SEARCH_STRING).getOffset() > -1);
++				searchLatch.countDown();
++			}
++		}, Arrays.asList(TEST_SEARCH_STRING)));
++
++		boolean await = searchLatch.await(5, TimeUnit.SECONDS);
++		if (await) {
++			logger.info("Messages searched in time");
++		} else {
++			logger.warning("Couldnt finish search in time");
++		}
++
++		final CountDownLatch failureSearchLatch = new CountDownLatch(1);
++		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
++
++			@Override
++			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
++				logger.info("Done searching " + msgPositionMap.toString());
++				Assert.assertFalse(msgPositionMap.containsKey(TEST_SEARCH_FAILURE_STRING));
++				failureSearchLatch.countDown();
++			}
++		}, Arrays.asList(TEST_SEARCH_FAILURE_STRING)));
++
++		await = searchLatch.await(5, TimeUnit.SECONDS);
++		if (await) {
++			logger.info("Messages searched in time");
++		} else {
++			logger.warning("Couldnt finish search in time");
++		}
++	}
++
++	@Test
++	public void testMsgSearchSuccessAndFailure() throws InterruptedException, ExecutionException, TimeoutException {
++		sendMsg(TEST_SEARCH_STRING);
++
++		Thread.sleep(200);
++
++		final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
++		final CountDownLatch searchLatch = new CountDownLatch(1);
++		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
++
++			@Override
++			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
++				logger.info("Done searching " + msgPositionMap.get(TEST_SEARCH_STRING).getOffset());
++				Assert.assertTrue(msgPositionMap.get(TEST_SEARCH_STRING).getOffset() > -1);
++				searchLatch.countDown();
++			}
++		}, Arrays.asList(TEST_SEARCH_STRING)));
++
++		boolean await = searchLatch.await(5, TimeUnit.SECONDS);
++		if (await) {
++			logger.info("Messages searched in time");
++		} else {
++			logger.warning("Couldnt finish search in time");
++		}
++
++		final CountDownLatch failureSearchLatch = new CountDownLatch(1);
++		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
++
++			@Override
++			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
++				logger.info("Done searching " + msgPositionMap);
++				Assert.assertFalse(msgPositionMap.containsKey(TEST_SEARCH_FAILURE_STRING));
++				failureSearchLatch.countDown();
++			}
++		}, Arrays.asList(TEST_SEARCH_FAILURE_STRING)));
++
++		await = searchLatch.await(5, TimeUnit.SECONDS);
++		if (await) {
++			logger.info("Messages searched in time");
++		} else {
++			logger.warning("Couldnt finish search in time");
++		}
++	}
++
++	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
++		final KafkaProducer<String, String> producer = getProducer();
++		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
++		producer.send(producerRecord).get(15000, TimeUnit.MILLISECONDS);
++	}
++
++	private KafkaProducer<String, String> getProducer() {
++		if (this.producer == null) {
++			SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++			Properties props = odfConfig.getKafkaProducerProperties();
++			final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
++			StringBuilder brokersString = new StringBuilder();
++			while (brokers.hasNext()) {
++				brokersString.append(brokers.next());
++				if (brokers.hasNext()) {
++					brokersString.append(",");
++				}
++			}
++			props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++			producer = new KafkaProducer<String, String>(props);
++		}
++		return producer;
++	}
++
++	@After
++	public void closeProducer() {
++		if (getProducer() != null) {
++			getProducer().close();
++		}
++	}
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java
+new file mode 100755
+index 0000000..f97dd4e
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java
+@@ -0,0 +1,314 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Properties;
++import java.util.UUID;
++import java.util.concurrent.ExecutionException;
++import java.util.concurrent.ExecutorService;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.TimeoutException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.I0Itec.zkclient.exception.ZkTimeoutException;
++import org.apache.atlas.odf.api.engine.ThreadStatus;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInitializer;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
++import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
++import org.apache.kafka.clients.consumer.ConsumerConfig;
++import org.apache.kafka.clients.producer.KafkaProducer;
++import org.apache.kafka.clients.producer.ProducerConfig;
++import org.apache.kafka.clients.producer.ProducerRecord;
++import org.junit.After;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager;
++import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++
++import kafka.admin.AdminUtils;
++import kafka.common.TopicExistsException;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++
++public class MultiPartitionConsumerTest extends ODFTestcase {
++	static Logger logger = ODFTestLogger.get();
++	final static String topicName = "my_dummy_test_topic" + UUID.randomUUID().toString();
++	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++	static final int PARTITION_COUNT = 3;
++	private static final int MSG_PER_PARTITION = 5;
++	private final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
++
++	@BeforeClass
++	public static void setupTopic() {
++		ZkClient zkClient = null;
++		try {
++			zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
++			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
++			// using partition size 1 and replication size 1, no special
++			// per-topic config needed
++			logger.log(Level.FINE, "Topic ''{0}'' does not exist, creating it", topicName);
++			//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
++			AdminUtils.createTopic(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false), topicName, PARTITION_COUNT, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
++			logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
++		} catch (TopicExistsException ex) {
++			logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
++		} catch (ZkTimeoutException zkte) {
++			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zookeeperHost);
++		} finally {
++			if (zkClient != null) {
++				zkClient.close();
++			}
++		}
++	}
++
++	@After
++	public void cleanupConsumers() {
++		logger.info("Cleaning up consumers...");
++		logger.info("----------------------------------  Stopping ODF...");
++		ODFInitializer.stop();
++		logger.info("----------------------------------  Starting ODF...");
++		ODFInitializer.start();
++		logger.info("----------------------------------  ODF started.");
++	}
++
++	@Test
++	public void testMultiPartitionDelayedConsumption() throws InterruptedException, ExecutionException {
++		Properties kafkaConsumerProperties = getConsumerProps();
++		final List<String> consumedMsgs = new ArrayList<String>();
++		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
++
++		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
++		final int processingDelay = 2000;
++		for (int no = 0; no < PARTITION_COUNT; no++) {
++			final int currentThread = no;
++			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
++
++				@Override
++				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
++					try {
++						Thread.sleep(processingDelay);
++					} catch (InterruptedException e) {
++						// TODO Auto-generated catch block
++						e.printStackTrace();
++					}
++					consumedMsgs.add(msg);
++					logger.info("process " + msg + " in thread " + currentThread);
++				}
++			};
++
++			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
++
++			final String consumerThread = threadPrefix + no;
++			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
++			startupList.add(startUnmanagedThread);
++		}
++		try {
++			threadManager.waitForThreadsToBeReady(30000, startupList);
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
++					sendMsg("Partition " + no + " msg " + msgNo);
++				}
++			}
++
++			int totalWait = 0;
++			while (totalWait < PARTITION_COUNT * MSG_PER_PARTITION * processingDelay + 10000 && consumedMsgs.size() < PARTITION_COUNT * MSG_PER_PARTITION) {
++				Thread.sleep(2000);
++				totalWait += 2000;
++			}
++
++			logger.info("Done with all messages after " + totalWait);
++
++			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
++
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
++				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
++			}
++		} catch (TimeoutException e) {
++			Assert.fail("Consumer could not be started on time");
++		}
++	}
++
++	@Test
++	public void testMultiPartitionConsumption() throws InterruptedException, ExecutionException {
++		Properties kafkaConsumerProperties = getConsumerProps();
++		final List<String> consumedMsgs = new ArrayList<String>();
++		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
++
++		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
++		for (int no = 0; no < PARTITION_COUNT; no++) {
++			final int currentThread = no;
++			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
++
++				@Override
++				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
++					consumedMsgs.add(msg);
++					logger.info("process " + msg + " in thread " + currentThread);
++				}
++			};
++
++			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
++
++			final String consumerThread = threadPrefix + no;
++			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
++			startupList.add(startUnmanagedThread);
++		}
++		try {
++			threadManager.waitForThreadsToBeReady(30000, startupList);
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
++					sendMsg("Partition " + no + " msg " + msgNo);
++				}
++			}
++
++			int totalWait = 0;
++			boolean done = false;
++			while (totalWait < 30 && !done) {
++				if (consumedMsgs.size() == PARTITION_COUNT * MSG_PER_PARTITION) {
++					done = true;
++				}
++				totalWait++;
++				Thread.sleep(500);
++			}
++
++			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
++
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
++				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
++			}
++		} catch (TimeoutException e) {
++			Assert.fail("Consumer could not be started on time");
++		}
++	}
++
++	@Test
++	public void testMultiPartitionExceptionAndRetryDuringProcessing() throws InterruptedException, ExecutionException {
++		Properties kafkaConsumerProperties = getConsumerProps();
++		final List<String> consumedMsgs = new ArrayList<String>();
++		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
++
++		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
++		for (int no = 0; no < PARTITION_COUNT; no++) {
++			final int currentThread = no;
++			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
++
++				private int excCount = 0;
++
++				@Override
++				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
++					if (excCount < KafkaQueueConsumer.MAX_PROCESSING_EXCEPTIONS - 1) {
++						excCount++;
++						logger.info("Throw exception " + excCount + " on consumer " + currentThread);
++						throw new RuntimeException("Forced error on consumer");
++					}
++					consumedMsgs.add(msg);
++					logger.info("process " + msg + " in thread " + currentThread);
++				}
++			};
++
++			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
++
++			final String consumerThread = threadPrefix + no;
++			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
++			startupList.add(startUnmanagedThread);
++		}
++		try {
++			threadManager.waitForThreadsToBeReady(30000, startupList);
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
++					sendMsg("Partition " + no + " msg " + msgNo);
++				}
++			}
++
++			int totalWait = 0;
++			boolean done = false;
++			while (totalWait < 30 && !done) {
++				if (consumedMsgs.size() == PARTITION_COUNT * MSG_PER_PARTITION) {
++					done = true;
++				}
++				totalWait++;
++				Thread.sleep(500);
++			}
++			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
++
++			for (int no = 0; no < PARTITION_COUNT; no++) {
++				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
++				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
++			}
++		} catch (TimeoutException e) {
++			Assert.fail("Consumer could not be started on time");
++		}
++	}
++
++	private Properties getConsumerProps() {
++		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++		Properties kafkaConsumerProperties = odfConfig.getKafkaConsumerProperties();
++		final String groupId = "retrying-dummy-consumer";
++		kafkaConsumerProperties.put("group.id", groupId);
++		kafkaConsumerProperties.put("zookeeper.connect", zookeeperHost);
++		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
++		StringBuilder brokersString = new StringBuilder();
++		while (brokers.hasNext()) {
++			brokersString.append(brokers.next());
++			if (brokers.hasNext()) {
++				brokersString.append(",");
++			}
++		}
++		kafkaConsumerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++		kafkaConsumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
++		kafkaConsumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++		kafkaConsumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
++
++		return kafkaConsumerProperties;
++	}
++
++	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
++		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
++		Properties props = odfConfig.getKafkaProducerProperties();
++		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
++		StringBuilder brokersString = new StringBuilder();
++		while (brokers.hasNext()) {
++			brokersString.append(brokers.next());
++			if (brokers.hasNext()) {
++				brokersString.append(",");
++			}
++		}
++		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
++		//Should we use a custom partitioner? we could try to involve consumer offsets and always put on "emptiest" partition
++		//props.put("partitioner.class", TestMessagePartitioner.class);
++
++		final KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
++		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
++		producer.send(producerRecord).get(3000, TimeUnit.MILLISECONDS);
++		producer.close();
++	}
++
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java
+new file mode 100755
+index 0000000..d1c9810
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java
+@@ -0,0 +1,99 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
++
++public class ParallelServiceErrorTest extends ODFTestcase {
++	private static final int NUMBER_OF_QUEUED_REQUESTS = 1;
++	Logger log = ODFTestLogger.get();
++
++	@Test
++	public void runDataSetsInParallelError() throws Exception {
++		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "errorID2" }), AnalysisRequestStatus.State.FINISHED, AnalysisRequestStatus.State.ERROR);
++	}
++
++	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, AnalysisRequestStatus.State... expectedState) throws Exception {
++		log.info("Running data sets in parallel: " + dataSetIDs);
++		log.info("Expected state: " + expectedState);
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++
++		List<AnalysisRequest> requests = new ArrayList<AnalysisRequest>();
++		List<AnalysisResponse> responses = new ArrayList<AnalysisResponse>();
++		List<String> idList = new ArrayList<String>();
++
++		for (int no = 0; no < NUMBER_OF_QUEUED_REQUESTS; no++) {
++			for (String dataSet : dataSetIDs) {
++				final AnalysisRequest req = ODFAPITest.createAnalysisRequest(Arrays.asList(dataSet + UUID.randomUUID().toString()));
++				AnalysisResponse resp = analysisManager.runAnalysis(req);
++				req.setId(resp.getId());
++				requests.add(req);
++				idList.add(resp.getId());
++				responses.add(resp);
++			}
++		}
++		log.info("Parallel requests started: " + idList.toString());
++
++		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), requests.size());
++		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), responses.size());
++
++		// check that requests are processed in parallel: 
++		//   there must be a point in time where both requests are in status "active"
++		log.info("Polling for status of parallel request...");
++		boolean foundPointInTimeWhereBothRequestsAreActive = false;
++		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
++		List<AnalysisRequestStatus.State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
++		do {
++			int foundActive = 0;
++			allSingleStates.clear();
++			for (AnalysisRequest request : requests) {
++				final AnalysisRequestStatus.State state = analysisManager.getAnalysisRequestStatus(request.getId()).getState();
++				if (state == AnalysisRequestStatus.State.ACTIVE) {
++					log.info("ACTIVE: " + request.getId() + " foundactive: " + foundActive);
++					foundActive++;
++				} else {
++					log.info("NOT ACTIVE " + request.getId() + " _ " + state);
++				}
++				allSingleStates.add(state);
++			}
++			if (foundActive > 1) {
++				foundPointInTimeWhereBothRequestsAreActive = true;
++			}
++
++			maxPolls--;
++			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
++		} while (maxPolls > 0 && Utils.containsNone(allSingleStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.ACTIVE, AnalysisRequestStatus.State.QUEUED }));
++
++		Assert.assertTrue(maxPolls > 0);
++		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
++		Assert.assertTrue(allSingleStates.containsAll(Arrays.asList(expectedState)));
++	}
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java
+new file mode 100755
+index 0000000..7a180d2
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java
+@@ -0,0 +1,100 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.analysis.AnalysisManager;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.core.test.ODFTestLogger;
++import org.apache.atlas.odf.core.test.ODFTestcase;
++import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
++
++public class ParallelServiceTest extends ODFTestcase {
++	private static final int NUMBER_OF_QUEUED_REQUESTS = 1;
++	Logger log = ODFTestLogger.get();
++
++	@Test
++	public void runDataSetsInParallelSuccess() throws Exception {
++		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "successID2" }), State.FINISHED, State.FINISHED);
++	}
++
++	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, State... expectedState) throws Exception {
++		log.info("Running data sets in parallel: " + dataSetIDs);
++		log.info("Expected state: " + expectedState);
++		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
++
++		List<AnalysisRequest> requests = new ArrayList<AnalysisRequest>();
++		List<AnalysisResponse> responses = new ArrayList<AnalysisResponse>();
++		List<String> idList = new ArrayList<String>();
++
++		for (int no = 0; no < NUMBER_OF_QUEUED_REQUESTS; no++) {
++			for (String dataSet : dataSetIDs) {
++				final AnalysisRequest req = ODFAPITest.createAnalysisRequest(Arrays.asList(dataSet + UUID.randomUUID().toString()));
++				AnalysisResponse resp = analysisManager.runAnalysis(req);
++				req.setId(resp.getId());
++				requests.add(req);
++				idList.add(resp.getId());
++				responses.add(resp);
++			}
++		}
++		log.info("Parallel requests started: " + idList.toString());
++
++		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), requests.size());
++		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), responses.size());
++
++		// check that requests are processed in parallel: 
++		//   there must be a point in time where both requests are in status "active"
++		log.info("Polling for status of parallel request...");
++		boolean foundPointInTimeWhereBothRequestsAreActive = false;
++		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
++		List<State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
++		do {
++			int foundActive = 0;
++			allSingleStates.clear();
++			for (AnalysisRequest request : requests) {
++				final State state = analysisManager.getAnalysisRequestStatus(request.getId()).getState();
++				if (state == State.ACTIVE) {
++					log.info("ACTIVE: " + request.getId() + " foundactive: " + foundActive);
++					foundActive++;
++				} else {
++					log.info("NOT ACTIVE " + request.getId() + " _ " + state);
++				}
++				allSingleStates.add(state);
++			}
++			if (foundActive > 1) {
++				foundPointInTimeWhereBothRequestsAreActive = true;
++			}
++
++			maxPolls--;
++			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
++		} while (maxPolls > 0 && Utils.containsNone(allSingleStates, new State[] { State.ACTIVE, State.QUEUED }));
++
++		Assert.assertTrue(maxPolls > 0);
++		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
++		Assert.assertTrue(allSingleStates.containsAll(Arrays.asList(expectedState)));
++	}
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java
+new file mode 100755
+index 0000000..5e3d97e
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java
+@@ -0,0 +1,49 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.test.TestEnvironmentInitializer;
++
++public class TestEnvironmentMessagingInitializer implements TestEnvironmentInitializer {
++
++	public TestEnvironmentMessagingInitializer() {
++	}
++	
++	public void start() {
++		Logger logger = Logger.getLogger(TestEnvironmentMessagingInitializer.class.getName());
++		try {
++			logger.info("Starting Test-Kafka during initialization...");
++			TestKafkaStarter starter = new TestKafkaStarter();
++			starter.startKafka();
++			logger.info("Test-Kafka initialized");
++		} catch (Exception exc) {
++			logger.log(Level.INFO, "Exception occurred while starting test kafka", exc);
++			throw new RuntimeException(exc);
++		}
++	}
++
++	@Override
++	public void stop() {
++		// TODO Auto-generated method stub
++		
++	}
++
++	@Override
++	public String getName() {
++		return "Kafka1001";
++	}
++}
+diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java
+new file mode 100755
+index 0000000..1c3025e
+--- /dev/null
++++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java
+@@ -0,0 +1,306 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.test.messaging.kafka;
++
++import java.io.File;
++import java.io.FileNotFoundException;
++import java.io.IOException;
++import java.net.BindException;
++import java.net.DatagramSocket;
++import java.net.ServerSocket;
++import java.rmi.NotBoundException;
++import java.util.List;
++import java.util.Properties;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.TimeUnit;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.I0Itec.zkclient.ZkClient;
++import org.I0Itec.zkclient.ZkConnection;
++import org.apache.kafka.common.protocol.SecurityProtocol;
++import org.apache.wink.json4j.JSONObject;
++import org.apache.zookeeper.KeeperException.NoNodeException;
++import org.apache.zookeeper.WatchedEvent;
++import org.apache.zookeeper.Watcher;
++import org.apache.zookeeper.Watcher.Event.KeeperState;
++import org.apache.zookeeper.ZooKeeper;
++import org.apache.zookeeper.ZooKeeper.States;
++import org.apache.zookeeper.server.ServerConfig;
++import org.apache.zookeeper.server.ZooKeeperServerMain;
++import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
++
++import org.apache.atlas.odf.core.Utils;
++
++import kafka.cluster.Broker;
++import kafka.server.KafkaConfig;
++import kafka.server.KafkaServerStartable;
++import kafka.utils.ZKStringSerializer$;
++import kafka.utils.ZkUtils;
++import scala.collection.JavaConversions;
++import scala.collection.Seq;
++
++public class TestKafkaStarter {
++
++	public static boolean deleteRecursive(File path) throws FileNotFoundException {
++		if (!path.exists()) {
++			throw new FileNotFoundException(path.getAbsolutePath());
++		}
++		boolean ret = true;
++		if (path.isDirectory()) {
++			for (File f : path.listFiles()) {
++				ret = ret && deleteRecursive(f);
++			}
++		}
++		return ret && path.delete();
++	}
++
++	static Thread zookeeperThread = null;
++	static boolean kafkaStarted = false;
++	static Object lockObject = new Object();
++	static KafkaServerStartable kafkaServer = null;
++	static ZooKeeperServerMainWithShutdown zooKeeperServer = null;
++
++
++	boolean cleanData = true; // all data is cleaned at server start !!
++
++	public boolean isCleanData() {
++		return cleanData;
++	}
++
++	public void setCleanData(boolean cleanData) {
++		this.cleanData = cleanData;
++	}
++
++	Logger logger = Logger.getLogger(TestKafkaStarter.class.getName());
++
++	void log(String s) {
++		logger.info(s);
++	}
++
++	int zookeeperStartupTime = 10000;
++	int kafkaStartupTime = 10000;
++
++	static class ZooKeeperServerMainWithShutdown extends ZooKeeperServerMain {
++		public void shutdown() {
++			super.shutdown();
++		}
++	}
++
++	private void startZookeeper() throws Exception {
++		log("Starting zookeeper");
++
++		final Properties zkProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties");
++		final String zkPort = (String) zkProps.get("clientPort");
++		if (zooKeeperServer == null) {
++			log("zookeeper properties: " + zkProps);
++			if (cleanData) {
++				String dataDir = zkProps.getProperty("dataDir");
++				log("Removing all data from zookeeper data dir " + dataDir);
++				File dir = new File(dataDir);
++				if (dir.exists()) {
++					if (!deleteRecursive(dir)) {
++						throw new IOException("Could not delete directory " + dataDir);
++					}
++				}
++			}
++			final ZooKeeperServerMainWithShutdown zk = new ZooKeeperServerMainWithShutdown();
++			final ServerConfig serverConfig = new ServerConfig();
++			log("Loading zookeeper config...");
++			QuorumPeerConfig zkConfig = new QuorumPeerConfig();
++			zkConfig.parseProperties(zkProps);
++			serverConfig.readFrom(zkConfig);
++
++			Runnable zookeeperStarter = new Runnable() {
++
++				@Override
++				public void run() {
++					try {
++						log("Now starting Zookeeper with API...");
++						zk.runFromConfig(serverConfig);
++					} catch (BindException ex) {
++						log("Embedded zookeeper could not be started, port is already in use. Trying to use external zookeeper");
++						ZooKeeper zk = null;
++						try {
++							zk = new ZooKeeper("localhost:" + zkPort, 5000, null);
++							if (zk.getState().equals(States.CONNECTED)) {
++								log("Using existing zookeeper running on port " + zkPort);
++								return;
++							} else {
++								throw new NotBoundException();
++							}
++						} catch (Exception zkEx) {
++							throw new RuntimeException("Could not connect to zookeeper on port " + zkPort + ". Please close all applications listening on this port.");
++						} finally {
++							if (zk != null) {
++								try {
++									zk.close();
++								} catch (InterruptedException e) {
++									logger.log(Level.WARNING, "An error occured closing the zk connection", e);
++								}
++							}
++						}
++					} catch (Exception e) {
++						e.printStackTrace();
++						throw new RuntimeException(e);
++					}
++
++				}
++			};
++
++			zookeeperThread = new Thread(zookeeperStarter);
++			zookeeperThread.setDaemon(true);
++			zookeeperThread.start();
++			log("Zookeeper start initiated");
++			zooKeeperServer = zk;
++		}
++		ZkConnection conn = new ZkConnection("localhost:" + zkPort);
++		final CountDownLatch latch = new CountDownLatch(1);
++		conn.connect(new Watcher() {
++
++			@Override
++			public void process(WatchedEvent event) {
++				log("Zookeeper event: " + event.getState());
++				if (event.getState().equals(KeeperState.SyncConnected)) {
++					log("Zookeeper server up and running");
++					latch.countDown();
++				}
++			}
++		});
++
++		boolean zkReady = latch.await(zookeeperStartupTime, TimeUnit.MILLISECONDS);
++		if (zkReady) {
++			log("Zookeeper initialized and started");
++
++		} else {
++			logger.severe("Zookeeper could not be initialized within " + (zookeeperStartupTime / 1000) + " sec");
++		}
++		conn.close();
++	}
++
++	public boolean isRunning() {
++		return kafkaStarted;
++	}
++
++	public void startKafka() throws Exception {
++		synchronized (lockObject) {
++			if (kafkaStarted) {
++				log("Kafka already running");
++				return;
++			}
++			this.startZookeeper();
++
++			log("Starting Kafka server...");
++			Properties kafkaProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties");
++			log("Kafka properties: " + kafkaProps);
++			KafkaConfig kafkaConfig = new KafkaConfig(kafkaProps);
++			int kafkaPort = kafkaConfig.port();
++			if (cleanData && isPortAvailable(kafkaPort)) {
++				String logDir = kafkaProps.getProperty("log.dirs");
++				log("Removing all data from kafka log dir: " + logDir);
++				File dir = new File(logDir);
++				if (dir.exists()) {
++					if (!deleteRecursive(dir)) {
++						throw new IOException("Kafka logDir could not be deleted: " + logDir);
++					}
++				}
++			}
++			if (!isPortAvailable(kafkaPort)) {
++				log("Kafka port " + kafkaPort + " is already in use. "
++						+ "Checking if zookeeper has a registered broker on this port to make sure it is an existing kafka instance using the port.");
++				ZooKeeper zk = new ZooKeeper(kafkaConfig.zkConnect(), 10000, null);
++				try {
++					List<String> ids = zk.getChildren("/brokers/ids", false);
++					if (ids != null && !ids.isEmpty()) {
++						for (String id : ids) {
++							String brokerInfo = new String(zk.getData("/brokers/ids/" + id, false, null), "UTF-8");
++							JSONObject broker = new JSONObject(brokerInfo);
++							Integer port = new Integer(String.valueOf(broker.get("port")));
++							if (port != null && port.equals(kafkaPort)) {
++								log("Using externally started kafka broker on port " + port);
++								kafkaStarted = true;
++								return;
++							}
++						}
++					}
++				} catch (NoNodeException ex) {
++					log("No brokers registered with zookeeper!");
++					throw new RuntimeException("Kafka broker port " + kafkaPort
++							+ " not available and no broker found! Please close all running applications listening on this port");
++				} finally {
++					if (zk != null) {
++						try {
++							zk.close();
++						} catch (InterruptedException e) {
++							logger.log(Level.WARNING, "An error occured closing the zk connection", e);
++						}
++					}
++				}
++			}
++			KafkaServerStartable kafka  = KafkaServerStartable.fromProps(kafkaProps);
++			kafka.startup();
++			log("Kafka server start initiated");
++
++			kafkaServer = kafka;
++			log("Give Kafka a maximum of " + kafkaStartupTime + " ms to start");
++			ZkClient zk = new ZkClient(kafkaConfig.zkConnect(), 10000, 5000, ZKStringSerializer$.MODULE$);
++			int maxRetryCount = kafkaStartupTime / 1000;
++			int cnt = 0;
++			while (cnt < maxRetryCount) {
++				cnt++;
++				Seq<Broker> allBrokersInCluster = new ZkUtils(zk, new ZkConnection(kafkaConfig.zkConnect()), false).getAllBrokersInCluster();
++				List<Broker> brokers = JavaConversions.seqAsJavaList(allBrokersInCluster);
++				for (Broker broker : brokers) {
++					if (broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port() == kafkaPort) {
++						log("Broker is registered, Kafka is available after " + cnt + " seconds");
++						kafkaStarted = true;
++						return;
++					}
++				}
++				Thread.sleep(1000);
++			}
++			logger.severe("Kafka broker was not started after " + kafkaStartupTime + " ms");
++		}
++	}
++
++	public void shutdownKafka() {
++		// do nothing for shutdown
++	}
++
++	boolean isPortAvailable(int port) {
++		ServerSocket ss = null;
++		DatagramSocket ds = null;
++		try {
++			ss = new ServerSocket(port);
++			ss.setReuseAddress(true);
++			ds = new DatagramSocket(port);
++			ds.setReuseAddress(true);
++			return true;
++		} catch (IOException e) {
++		} finally {
++			if (ds != null) {
++				ds.close();
++			}
++
++			if (ss != null) {
++				try {
++					ss.close();
++				} catch (IOException e) {
++				}
++			}
++		}
++
++		return false;
++	}
++}
+diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties
+new file mode 100755
+index 0000000..4769c95
+--- /dev/null
++++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties
+@@ -0,0 +1,136 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Licensed to the Apache Software Foundation (ASF) under one or more
++# contributor license agreements.  See the NOTICE file distributed with
++# this work for additional information regarding copyright ownership.
++# The ASF licenses this file to You under the Apache License, Version 2.0
++# (the "License"); you may not use this file except in compliance with
++# the License.  You may obtain a copy of the License at
++# 
++#    http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# see kafka.server.KafkaConfig for additional details and defaults
++
++############################# Server Basics #############################
++
++# The id of the broker. This must be set to a unique integer for each broker.
++broker.id=0
++
++############################# Socket Server Settings #############################
++
++listeners=PLAINTEXT://:9092 
++
++# The port the socket server listens on
++# port=9092
++
++# Hostname the broker will bind to. If not set, the server will bind to all interfaces
++#host.name=localhost
++
++# Hostname the broker will advertise to producers and consumers. If not set, it uses the
++# value for "host.name" if configured.  Otherwise, it will use the value returned from
++# java.net.InetAddress.getCanonicalHostName().
++#advertised.host.name=<hostname routable by clients>
++
++# The port to publish to ZooKeeper for clients to use. If this is not set,
++# it will publish the same port that the broker binds to.
++#advertised.port=<port accessible by clients>
++
++# The number of threads handling network requests
++num.network.threads=3
++ 
++# The number of threads doing disk I/O
++num.io.threads=8
++
++# The send buffer (SO_SNDBUF) used by the socket server
++socket.send.buffer.bytes=102400
++
++# The receive buffer (SO_RCVBUF) used by the socket server
++socket.receive.buffer.bytes=102400
++
++# The maximum size of a request that the socket server will accept (protection against OOM)
++socket.request.max.bytes=104857600
++
++
++############################# Log Basics #############################
++
++# A comma seperated list of directories under which to store log files
++log.dirs=/tmp/odf-embedded-test-kafka/kafka-logs
++
++# The default number of log partitions per topic. More partitions allow greater
++# parallelism for consumption, but this will also result in more files across
++# the brokers.
++num.partitions=1
++
++# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
++# This value is recommended to be increased for installations with data dirs located in RAID array.
++num.recovery.threads.per.data.dir=1
++
++############################# Log Flush Policy #############################
++
++# Messages are immediately written to the filesystem but by default we only fsync() to sync
++# the OS cache lazily. The following configurations control the flush of data to disk. 
++# There are a few important trade-offs here:
++#    1. Durability: Unflushed data may be lost if you are not using replication.
++#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
++#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
++# The settings below allow one to configure the flush policy to flush data after a period of time or
++# every N messages (or both). This can be done globally and overridden on a per-topic basis.
++
++# The number of messages to accept before forcing a flush of data to disk
++#log.flush.interval.messages=10000
++
++# The maximum amount of time a message can sit in a log before we force a flush
++#log.flush.interval.ms=1000
++
++############################# Log Retention Policy #############################
++
++# The following configurations control the disposal of log segments. The policy can
++# be set to delete segments after a period of time, or after a given size has accumulated.
++# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
++# from the end of the log.
++
++# The minimum age of a log file to be eligible for deletion
++log.retention.hours=24
++
++# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
++# segments don't drop below log.retention.bytes.
++#log.retention.bytes=1073741824
++
++# The maximum size of a log segment file. When this size is reached a new log segment will be created.
++log.segment.bytes=1073741824
++
++# The interval at which log segments are checked to see if they can be deleted according 
++# to the retention policies
++log.retention.check.interval.ms=300000
++
++# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
++# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
++log.cleaner.enable=false
++
++############################# Zookeeper #############################
++
++# Zookeeper connection string (see zookeeper docs for details).
++# This is a comma separated host:port pairs, each corresponding to a zk
++# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
++# You can also append an optional chroot string to the urls to specify the
++# root directory for all kafka znodes.
++zookeeper.connect=localhost:2181
++
++# Timeout in ms for connecting to zookeeper
++zookeeperConnectionTimeoutMs=6000
+diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties
+new file mode 100755
+index 0000000..7234e9c
+--- /dev/null
++++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties
+@@ -0,0 +1,34 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Licensed to the Apache Software Foundation (ASF) under one or more
++# contributor license agreements.  See the NOTICE file distributed with
++# this work for additional information regarding copyright ownership.
++# The ASF licenses this file to You under the Apache License, Version 2.0
++# (the "License"); you may not use this file except in compliance with
++# the License.  You may obtain a copy of the License at
++# 
++#    http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# the directory where the snapshot is stored.
++dataDir=/tmp/odf-embedded-test-kafka/zookeeper
++# the port at which the clients will connect
++clientPort=2181
++# disable the per-ip limit on the number of connections since this is a non-production config
++maxClientCnxns=0
++
+diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..5611c29
+--- /dev/null
++++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,18 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++## USE for TESTs only
++
++ODFConfigurationStorage=MockConfigurationStorage
++SparkServiceExecutor=MockSparkServiceExecutor
++NotificationManager=TestNotificationManager
+diff --git a/odf/odf-spark-example-application/.gitignore b/odf/odf-spark-example-application/.gitignore
+new file mode 100755
+index 0000000..a9e1d46
+--- /dev/null
++++ b/odf/odf-spark-example-application/.gitignore
+@@ -0,0 +1,7 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++.DS_Store
++/bin/
+diff --git a/odf/odf-spark-example-application/pom.xml b/odf/odf-spark-example-application/pom.xml
+new file mode 100755
+index 0000000..a2baa9e
+--- /dev/null
++++ b/odf/odf-spark-example-application/pom.xml
+@@ -0,0 +1,74 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>com.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-spark-example-application</artifactId>
++	<packaging>jar</packaging>
++	<name>odf-spark-example-application</name>
++	<build>
++		<plugins>
++			<plugin>
++				<artifactId>maven-compiler-plugin</artifactId>
++				<version>3.3</version>
++				<configuration>
++					<source>1.7</source>
++					<target>1.7</target>
++				</configuration>
++			</plugin>
++			<plugin>
++				<artifactId>maven-assembly-plugin</artifactId>
++				<executions>
++					<execution>
++						<phase>package</phase>
++						<goals>
++							<goal>single</goal>
++						</goals>
++					</execution>
++				</executions>
++				<configuration>
++					<descriptorRefs>
++						<descriptorRef>jar-with-dependencies</descriptorRef>
++					</descriptorRefs>
++				</configuration>
++			</plugin>
++		</plugins>
++
++	</build>
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-sql_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++		</dependency>
++		<dependency> <!-- Spark dependency -->
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-core_2.11</artifactId>
++			<version>2.1.0</version>
++			<scope>provided</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++	</dependencies>
++</project>
+diff --git a/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java
+new file mode 100755
+index 0000000..f5f7b70
+--- /dev/null
++++ b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java
+@@ -0,0 +1,57 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.spark;
++
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.spark.SparkDiscoveryServiceBase;
++import org.apache.spark.sql.Dataset;
++import org.apache.spark.sql.Row;
++
++import org.apache.atlas.odf.api.spark.SparkDiscoveryService;
++import org.apache.atlas.odf.api.spark.SparkUtils;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++
++public class SparkDiscoveryServiceExample extends SparkDiscoveryServiceBase implements SparkDiscoveryService {
++	static Logger logger = Logger.getLogger(SparkDiscoveryServiceExample.class.getName());
++
++	@Override
++	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
++		logger.log(Level.INFO, "Checking data set access.");
++		DataSetCheckResult checkResult = new DataSetCheckResult();
++		checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
++		Dataset<Row> df = SparkUtils.createDataFrame(this.spark, dataSetContainer, this.mds);
++		// Print first rows to check whether data frame can be accessed
++		df.show(10);
++		return checkResult;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
++		logger.log(Level.INFO, "Starting discovery service.");
++		Dataset<Row> df = SparkUtils.createDataFrame(spark, request.getDataSetContainer(), this.mds);
++		Map<String,Dataset<Row>> annotationDataFrameMap = SummaryStatistics.processDataFrame(this.spark, df, null);
++		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++		response.setCode(ResponseCode.OK);
++		response.setDetails("Discovery service successfully completed.");
++		response.setResult(SparkUtils.createAnnotationsFromDataFrameMap(request.getDataSetContainer(), annotationDataFrameMap, this.mds));
++		return response;
++	}
++}
+diff --git a/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java
+new file mode 100755
+index 0000000..a7d1542
+--- /dev/null
++++ b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java
+@@ -0,0 +1,112 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.spark;
++
++import org.apache.atlas.odf.api.spark.SparkUtils;
++import org.apache.spark.SparkFiles;
++
++import java.text.MessageFormat;
++import java.util.HashMap;
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.spark.sql.Column;
++import org.apache.spark.sql.Dataset;
++import org.apache.spark.sql.Row;
++import org.apache.spark.sql.SparkSession;
++
++public class SummaryStatistics {
++	static Logger logger = Logger.getLogger(SummaryStatistics.class.getName());
++	private static final String CSV_FILE_PARAMETER = "-dataFile=";
++	// The following constant is defined in class DiscoveryServiceSparkEndpoint but is duplicated here to avoid dependencies to the ODF code:
++	private static final String ANNOTATION_PROPERTY_COLUMN_NAME = "ODF_ANNOTATED_COLUMN";
++
++	// The main method is only available for testing purposes and is not called by ODF
++	public static void main(String[] args) {
++		logger.log(Level.INFO, "Running spark launcher with arguments: " + args[0]);
++		if ((args[0] == null) || (!args[0].startsWith(CSV_FILE_PARAMETER))) {
++			System.out.println(MessageFormat.format("Error: Spark Application Parameter '{0}' is missing.", CSV_FILE_PARAMETER));
++			System.exit(1);
++		}
++		String dataFilePath = SparkFiles.get(args[0].replace(CSV_FILE_PARAMETER, ""));
++		logger.log(Level.INFO, "Data file path is " + dataFilePath);
++
++		// Create Spark session
++		SparkSession spark = SparkSession.builder().master("local").appName("ODF Spark example application").getOrCreate();
++
++		// Read CSV file into data frame
++		Dataset<Row> df = spark.read()
++		    .format("com.databricks.spark.csv")
++		    .option("inferSchema", "true")
++		    .option("header", "true")
++		    .load(dataFilePath);
++
++		// Run actual job and print result
++		Map<String, Dataset<Row>> annotationDataFrameMap = null;
++		try {
++			annotationDataFrameMap = processDataFrame(spark, df, args);
++		} catch (Exception e) {
++			logger.log(Level.INFO, MessageFormat.format("An error occurred while processing data set {0}:", args[0]), e);
++		} finally {
++			// Close and stop spark context
++			spark.close();
++			spark.stop();
++		}
++		if (annotationDataFrameMap == null) {
++			System.exit(1);
++		} else {
++			// Print all annotationDataFrames for all annotation types to stdout
++			for (Map.Entry<String, Dataset<Row>> entry : annotationDataFrameMap.entrySet()) {
++				logger.log(Level.INFO, "Result data frame for annotation type " + entry.getKey() + ":");
++				entry.getValue().show();
++			}
++		}
++	}
++
++	// The following method contains the actual implementation of the ODF Spark discovery service
++	public static Map<String,Dataset<Row>> processDataFrame(SparkSession spark, Dataset<Row> df, String[] args) {
++		logger.log(Level.INFO, "Started summary statistics Spark application.");
++		Map<String, Dataset<Row>> resultMap = new HashMap<String, Dataset<Row>>();
++
++		// Print input data set
++		df.show();
++
++		// Create column annotation data frame that contains basic data frame statistics
++		Dataset<Row> dfStatistics = df.describe();
++
++		// Rename "summary" column to ANNOTATION_PROPERTY_COLUMN_NAME
++		String[] columnNames = dfStatistics.columns();
++		columnNames[0] = ANNOTATION_PROPERTY_COLUMN_NAME;
++		Dataset<Row> summaryStatistics =  dfStatistics.toDF(columnNames);
++		summaryStatistics.show();
++		String columnAnnotationTypeName = "SparkSummaryStatisticsAnnotation";
++
++		// Transpose table to turn it into format required by ODF
++		Dataset<Row> columnAnnotationDataFrame = SparkUtils.transposeDataFrame(spark, summaryStatistics);
++		columnAnnotationDataFrame.show();
++
++		// Create table annotation that contains the data frame's column count
++		String tableAnnotationTypeName = "SparkTableAnnotation";
++		Dataset<Row> tableAnnotationDataFrame = columnAnnotationDataFrame.select(new Column("count")).limit(1);
++		tableAnnotationDataFrame.show();
++
++		// Add annotation data frames to result map
++		resultMap.put(columnAnnotationTypeName, columnAnnotationDataFrame);
++		resultMap.put(tableAnnotationTypeName, tableAnnotationDataFrame);
++
++		logger.log(Level.INFO, "Spark job finished.");
++		return resultMap;
++	}
++}
+diff --git a/odf/odf-spark/.gitignore b/odf/odf-spark/.gitignore
+new file mode 100755
+index 0000000..b2f4a98
+--- /dev/null
++++ b/odf/odf-spark/.gitignore
+@@ -0,0 +1,6 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++.DS_Store
+diff --git a/odf/odf-spark/pom.xml b/odf/odf-spark/pom.xml
+new file mode 100755
+index 0000000..378f280
+--- /dev/null
++++ b/odf/odf-spark/pom.xml
+@@ -0,0 +1,242 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-spark</artifactId>
++	<packaging>jar</packaging>
++	<name>odf-spark</name>
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-store</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>test</scope>
++		</dependency>
++		<!-- Workaround: Add odf-spark-example-application because dynamic jar load does not seem to work on IBM JDK -->
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-spark-example-application</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-launcher_2.11</artifactId>
++			<version>2.1.0</version>
++		</dependency>
++		<dependency>
++			<groupId>commons-io</groupId>
++			<artifactId>commons-io</artifactId>
++			<version>2.4</version>
++		</dependency>
++		<!-- The following Spark dependencies are needed for testing only. -->
++		<!-- Nevertheless, they have to be added as compile dependencies in order to become available to the SDPFactory. -->
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-core_2.11</artifactId>
++			<version>2.1.0</version>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.spark</groupId>
++			<artifactId>spark-sql_2.11</artifactId>
++			<version>2.1.0</version>
++			<exclusions>
++				<exclusion>
++					<groupId>commons-codec</groupId>
++					<artifactId>commons-codec</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++	</dependencies>
++	<build>
++		<resources>
++			<resource>
++				<directory>${project.build.directory}/downloads</directory>
++			</resource>
++		</resources>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-jar-plugin</artifactId>
++				<version>2.6</version>
++				<executions>
++					<execution>
++						<goals>
++							<goal>test-jar</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
++						<odf.build.project.name>${project.name}</odf.build.project.name>
++					</systemPropertyVariables>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-dependency-plugin</artifactId>
++				<version>2.4</version>
++				<executions>
++					<execution>
++						<id>download-jar-file</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>copy</goal>
++						</goals>
++						<configuration>
++							<artifactItems>
++								<artifactItem>
++									<groupId>org.apache.atlas.odf</groupId>
++									<artifactId>odf-api</artifactId>
++									<version>1.2.0-SNAPSHOT</version>
++									<type>jar</type>
++									<overWrite>true</overWrite>
++									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
++								</artifactItem>
++								<artifactItem>
++									<groupId>org.apache.atlas.odf</groupId>
++									<artifactId>odf-spark-example-application</artifactId>
++									<version>1.2.0-SNAPSHOT</version>
++									<type>jar</type>
++									<overWrite>true</overWrite>
++									<outputDirectory>/tmp/odf-spark</outputDirectory>
++								</artifactItem>
++								<artifactItem>
++									<groupId>org.apache.atlas.odf</groupId>
++									<artifactId>odf-spark-example-application</artifactId>
++									<version>1.2.0-SNAPSHOT</version>
++									<type>jar</type>
++									<overWrite>true</overWrite>
++									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
++								</artifactItem>
++								<artifactItem>
++									<groupId>org.apache.wink</groupId>
++									<artifactId>wink-json4j</artifactId>
++									<version>1.4</version>
++									<type>jar</type>
++									<overWrite>true</overWrite>
++									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
++								</artifactItem>
++							</artifactItems>
++							<includes>**/*</includes>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++
++	<profiles>
++		<profile>
++			<id>integration-tests</id>
++			<activation>
++				<property>
++					<name>reduced-tests</name>
++					<value>!true</value>
++				</property>
++			</activation>
++			<build>
++				<plugins>
++					<plugin>
++						<groupId>org.apache.maven.plugins</groupId>
++						<artifactId>maven-failsafe-plugin</artifactId>
++						<version>2.19</version>
++						<configuration>
++							<systemPropertyVariables>
++								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++								<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>
++							</systemPropertyVariables>
++							<dependenciesToScan>
++								<dependency>org.apache.atlas.odf:odf-core</dependency>
++							</dependenciesToScan>
++							<includes>
++								<include>**/integrationtest/**/SparkDiscoveryServiceLocalTest.java</include>
++							</includes>
++						</configuration>
++						<executions>
++							<execution>
++								<id>integration-test</id>
++								<goals>
++									<goal>integration-test</goal>
++								</goals>
++							</execution>
++							<execution>
++								<id>verify</id>
++								<goals>
++									<goal>verify</goal>
++								</goals>
++							</execution>
++						</executions>
++					</plugin>
++				</plugins>
++			</build>
++		</profile>
++	</profiles>
++
++</project>
+diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java
+new file mode 100755
+index 0000000..84ae80c
+--- /dev/null
++++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java
+@@ -0,0 +1,154 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.spark;
++
++import java.io.PrintWriter;
++import java.io.StringWriter;
++import java.lang.reflect.Constructor;
++import java.text.MessageFormat;
++import java.util.Map;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.spark.SparkDiscoveryService;
++import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
++import org.apache.spark.sql.Dataset;
++import org.apache.spark.sql.Row;
++import org.apache.spark.sql.SparkSession;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.spark.SparkUtils;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++ * This class calls the actual Spark discovery services depending on the type of interface they implement.
++ * The class is used to run a Spark discovery service either on a local Spark cluster ({@link SparkServiceExecutorImpl})
++ * or on a remote Spark cluster ({@link SparkApplicationStub}).
++ * 
++ *
++ */
++
++public class LocalSparkServiceExecutor implements SparkServiceExecutor {
++	private Logger logger = Logger.getLogger(LocalSparkServiceExecutor.class.getName());
++	private SparkSession spark;
++	private MetadataStore mds;
++
++	void setSparkSession(SparkSession spark) {
++		this.spark = spark;
++	}
++
++	void setMetadataStore(MetadataStore mds) {
++		this.mds = mds;
++	}
++
++	@Override
++	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsProp, DataSetContainer container) {
++		DiscoveryServiceSparkEndpoint endpoint;
++		try {
++			endpoint = JSONUtils.convert(dsProp.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
++		} catch (JSONException e1) {
++			throw new RuntimeException(e1);
++		}
++		DataSetCheckResult checkResult = new DataSetCheckResult();
++		try {
++			SERVICE_INTERFACE_TYPE inputMethod = endpoint.getInputMethod();
++			if (inputMethod.equals(SERVICE_INTERFACE_TYPE.DataFrame)) {
++				MetaDataObject dataSet = container.getDataSet();
++				if (!(dataSet instanceof RelationalDataSet)) {
++					checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
++					checkResult.setDetails("This service can only process relational data sets.");
++				} else {
++					checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
++					Dataset<Row> df = SparkUtils.createDataFrame(this.spark, container, this.mds);
++					// Print first rows to check whether data frame can be accessed
++					df.show(10);
++				}
++			} else if (inputMethod.equals(SERVICE_INTERFACE_TYPE.Generic)) {
++				Class<?> clazz = Class.forName(endpoint.getClassName());
++				Constructor<?> cons = clazz.getConstructor();
++				SparkDiscoveryService service = (SparkDiscoveryService) cons.newInstance();
++				service.setMetadataStore(this.mds);
++				service.setSparkSession(this.spark);
++				checkResult = service.checkDataSet(container);
++			}
++		} catch (Exception e) {
++			logger.log(Level.WARNING,"Access to data set not possible.", e);
++			checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
++			checkResult.setDetails(getExceptionAsString(e));
++		} finally {
++			this.spark.close();
++			this.spark.stop();
++		}
++		return checkResult;
++	}
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsProp, DiscoveryServiceRequest request) {
++		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
++		response.setDetails("Annotations created successfully");
++		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
++		try {
++			DiscoveryServiceSparkEndpoint endpoint = JSONUtils.convert(dsProp.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
++			Class<?> clazz = Class.forName(endpoint.getClassName());
++			DataSetContainer container = request.getDataSetContainer();
++			String[] optionalArgs = {}; // For future use
++			SERVICE_INTERFACE_TYPE inputMethod = endpoint.getInputMethod();
++
++			if (inputMethod.equals(SERVICE_INTERFACE_TYPE.DataFrame)) {
++				if (!(container.getDataSet() instanceof RelationalDataSet)) {
++					throw new RuntimeException("This service can only process relational data sets (DataFile or Table).");
++				}
++				Dataset<Row> df = SparkUtils.createDataFrame(this.spark, container, this.mds);
++				@SuppressWarnings("unchecked")
++				Map<String, Dataset<Row>> annotationDataFrameMap = (Map<String, Dataset<Row>>) clazz.getMethod("processDataFrame", SparkSession.class, Dataset.class, String[].class).invoke(null, this.spark, df, (Object[]) optionalArgs);
++				response.setResult(SparkUtils.createAnnotationsFromDataFrameMap(container, annotationDataFrameMap, this.mds));
++			} else if (inputMethod.equals(SERVICE_INTERFACE_TYPE.Generic)) {
++				Constructor<?> cons = clazz.getConstructor();
++				SparkDiscoveryService service = (SparkDiscoveryService) cons.newInstance();
++				service.setMetadataStore(this.mds);
++				service.setSparkSession(this.spark);
++				response = service.runAnalysis(request);
++			} else {
++				throw new RuntimeException(MessageFormat.format("Unsupported interface type {0}.", inputMethod));
++			}
++		} catch(Exception e) {
++			logger.log(Level.WARNING,"Error running discovery service.", e);
++			response.setDetails(getExceptionAsString(e));
++			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
++		} finally {
++			this.spark.close();
++			this.spark.stop();
++		}
++		return response;
++	}
++
++	public static String getExceptionAsString(Throwable exc) {
++		StringWriter sw = new StringWriter();
++		PrintWriter pw = new PrintWriter(sw);
++		exc.printStackTrace(pw);
++		String st = sw.toString();
++		return st;
++	}
++}
+diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java
+new file mode 100755
+index 0000000..81fea2c
+--- /dev/null
++++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java
+@@ -0,0 +1,107 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.spark;
++
++import java.io.File;
++import java.io.FileOutputStream;
++import java.io.IOException;
++import java.io.InputStream;
++import java.net.MalformedURLException;
++import java.net.URL;
++import java.text.MessageFormat;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.commons.io.FileUtils;
++import org.apache.commons.io.IOUtils;
++
++import org.apache.atlas.odf.core.Utils;
++
++public class SparkJars {
++	private static Logger logger = Logger.getLogger(SparkJars.class.getName());
++
++	public String getResourceAsJarFile(String resource) {
++		ClassLoader cl = this.getClass().getClassLoader();
++		InputStream inputStream = cl.getResourceAsStream(resource);
++		if (inputStream == null) {
++        	String msg = MessageFormat.format("Resource {0} was not found.", resource);
++        	logger.log(Level.WARNING, msg);
++        	throw new RuntimeException(msg);
++		}
++		String tempFilePath = null;
++		try {
++		    File tempFile = File.createTempFile("driver", "jar");
++		    tempFilePath = tempFile.getAbsolutePath();
++		    logger.log(Level.INFO, "Creating temporary file " + tempFilePath);
++			IOUtils.copy(inputStream, new FileOutputStream(tempFile));
++			inputStream.close();
++			Utils.runSystemCommand("chmod 755 " + tempFilePath);
++		} catch (IOException e) {
++        	String msg = MessageFormat.format("Error creating temporary file from resource {0}: ", resource);
++        	logger.log(Level.WARNING, msg, e);
++        	throw new RuntimeException(msg + Utils.getExceptionAsString(e));
++		}
++		return tempFilePath;
++	}
++
++	public String getUrlasJarFile(String urlString) {
++		try {
++		    File tempFile = File.createTempFile("driver", "jar");
++	    	logger.log(Level.INFO, "Creating temporary file " + tempFile);
++			FileUtils.copyURLToFile(new URL(urlString), tempFile);
++			Utils.runSystemCommand("chmod 755 " + tempFile.getAbsolutePath());
++			return tempFile.getAbsolutePath();
++		} catch (MalformedURLException e) {
++			String msg = MessageFormat.format("An invalid Spark application URL {0} was provided: ", urlString);
++			logger.log(Level.WARNING, msg, e);
++			throw new RuntimeException(msg + Utils.getExceptionAsString(e));
++		} catch (IOException e) {
++			logger.log(Level.WARNING, "Error processing Spark application jar file.", e);
++			throw new RuntimeException("Error processing Spark application jar file: " + Utils.getExceptionAsString(e));
++		}
++	}
++
++	public byte[] getFileAsByteArray(String resourceOrURL) {
++        try {
++        	InputStream inputStream;
++        	if (isValidUrl(resourceOrURL)) {
++            	inputStream = new URL(resourceOrURL).openStream();
++        	} else {
++        		ClassLoader cl = this.getClass().getClassLoader();
++        		inputStream = cl.getResourceAsStream(resourceOrURL);
++        		if (inputStream == null) {
++                	String msg = MessageFormat.format("Resource {0} was not found.", resourceOrURL);
++                	logger.log(Level.WARNING, msg);
++                	throw new RuntimeException(msg);
++        		}
++        	}
++        	byte[] bytes = IOUtils.toByteArray(inputStream);
++        	return bytes;
++        } catch (IOException e) {
++        	String msg = MessageFormat.format("Error converting jar file {0} into byte array: ", resourceOrURL);
++        	logger.log(Level.WARNING, msg, e);
++        	throw new RuntimeException(msg + Utils.getExceptionAsString(e));
++        }
++	}
++
++	public static boolean isValidUrl(String urlString) {
++		try {
++			new URL(urlString);
++			return true;
++		} catch (java.net.MalformedURLException exc) {
++			// Expected exception if URL is not valid
++			return false;
++		}
++	}
++}
+diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java
+new file mode 100755
+index 0000000..720343b
+--- /dev/null
++++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java
+@@ -0,0 +1,102 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.spark;
++
++import java.lang.reflect.Method;
++import java.net.URL;
++import java.net.URLClassLoader;
++import java.text.MessageFormat;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
++import org.apache.spark.sql.SparkSession;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
++import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
++import org.apache.atlas.odf.api.settings.SparkConfig;
++import org.apache.atlas.odf.json.JSONUtils;
++
++/**
++ * Calls the appropriate implementation (local vs. remote) of the @link SparkServiceExecutor depending on the current @SparkConfig.
++ * Prepares the local Spark cluster to be used in unit and integration tests.
++ * 
++ *
++ */
++
++public class SparkServiceExecutorImpl implements SparkServiceExecutor {
++	private Logger logger = Logger.getLogger(SparkServiceExecutorImpl.class.getName());
++
++	@Override
++	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer) {
++		return this.getExecutor(dsri).checkDataSet(dsri, dataSetContainer);
++	};
++
++	@Override
++	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request) {
++		return this.getExecutor(dsri).runAnalysis(dsri, request);
++	}
++
++	private SparkServiceExecutor getExecutor(DiscoveryServiceProperties dsri) {
++		SettingsManager config = new ODFFactory().create().getSettingsManager();
++		DiscoveryServiceSparkEndpoint endpoint;
++		try {
++			endpoint = JSONUtils.convert(dsri.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
++		} catch (JSONException e1) {
++			throw new RuntimeException(e1);
++		}
++
++		SparkConfig sparkConfig = config.getODFSettings().getSparkConfig();
++		if (sparkConfig == null) {
++			String msg = "No Spark service is configured. Please manually register Spark service or bind a Spark service to your ODF Bluemix app.";
++			logger.log(Level.SEVERE, msg);
++			throw new RuntimeException(msg);
++		} else {
++			logger.log(Level.INFO, "Using local Spark cluster {0}.", sparkConfig.getClusterMasterUrl());
++			SparkSession spark = SparkSession.builder().master(sparkConfig.getClusterMasterUrl()).appName(dsri.getName()).getOrCreate();
++			SparkJars sparkJars = new SparkJars();
++			try {
++			    // Load jar file containing the Spark job to be started
++			    URLClassLoader classLoader = (URLClassLoader)ClassLoader.getSystemClassLoader();
++				Method method = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
++			    method.setAccessible(true);
++			    String applicationJarFile;
++				if (SparkJars.isValidUrl(endpoint.getJar())) {
++					applicationJarFile = sparkJars.getUrlasJarFile(endpoint.getJar());
++				} else {
++					applicationJarFile = sparkJars.getResourceAsJarFile(endpoint.getJar());
++				}
++				logger.log(Level.INFO, "Using application jar file {0}.", applicationJarFile);
++			    method.invoke(classLoader, new URL("file:" + applicationJarFile));
++			} catch (Exception e) {
++				String msg = MessageFormat.format("Error loading jar file {0} implementing the Spark discovery service: ", endpoint.getJar());
++				logger.log(Level.WARNING, msg, e);
++				spark.close();
++				spark.stop();
++				throw new RuntimeException(msg, e);
++			}
++			LocalSparkServiceExecutor executor = new LocalSparkServiceExecutor();
++			executor.setSparkSession(spark);
++			executor.setMetadataStore(new ODFFactory().create().getMetadataStore());
++		    return executor;
++		}
++	}
++}
+diff --git a/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..d6651ee
+--- /dev/null
++++ b/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,14 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++SparkServiceExecutor=org.apache.atlas.odf.core.spark.SparkServiceExecutorImpl
+diff --git a/odf/odf-store/.gitignore b/odf/odf-store/.gitignore
+new file mode 100755
+index 0000000..ea5ddb8
+--- /dev/null
++++ b/odf/odf-store/.gitignore
+@@ -0,0 +1,5 @@
++.settings
++target
++.classpath
++.project
++.factorypath
+\ No newline at end of file
+diff --git a/odf/odf-store/pom.xml b/odf/odf-store/pom.xml
+new file mode 100755
+index 0000000..3d0a93d
+--- /dev/null
++++ b/odf/odf-store/pom.xml
+@@ -0,0 +1,87 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-store</artifactId>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.zookeeper</groupId>
++			<artifactId>zookeeper</artifactId>
++			<version>3.4.6</version>
++			<scope>compile</scope>
++		</dependency>
++
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++	</dependencies>
++
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
++						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++						<odf.build.project.name>${project.name}</odf.build.project.name>
++					</systemPropertyVariables>
++					<dependenciesToScan>
++						<dependency>org.apache.atlas.odf:odf-core</dependency>
++					</dependenciesToScan>
++					<includes>
++					    <include>**/configuration/**/*.java</include>
++						<include>**/ZookeeperConfigurationStorageTest.java</include>
++					</includes>
++				</configuration>
++			</plugin>
++		</plugins>
++	</build>
++
++</project>
+diff --git a/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java b/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java
+new file mode 100755
+index 0000000..3ea9927
+--- /dev/null
++++ b/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java
+@@ -0,0 +1,247 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.store.zookeeper34;
++
++import java.io.IOException;
++import java.io.UnsupportedEncodingException;
++import java.text.MessageFormat;
++import java.util.HashSet;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.TimeUnit;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.core.Environment;
++import org.apache.atlas.odf.core.ODFInternalFactory;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++import org.apache.zookeeper.CreateMode;
++import org.apache.zookeeper.KeeperException;
++import org.apache.zookeeper.KeeperException.Code;
++import org.apache.zookeeper.KeeperException.NodeExistsException;
++import org.apache.zookeeper.WatchedEvent;
++import org.apache.zookeeper.Watcher;
++import org.apache.zookeeper.ZooDefs.Ids;
++import org.apache.zookeeper.ZooKeeper;
++import org.apache.zookeeper.data.Stat;
++
++import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
++
++public class ZookeeperConfigurationStorage implements ODFConfigurationStorage {
++	private Logger logger = Logger.getLogger(ZookeeperConfigurationStorage.class.getName());
++	static final String ZOOKEEPER_CONFIG_PATH = "/odf/config";
++	static String configCache = null; // cache is a string so that the object is not accidentally modified
++	static Object configCacheLock = new Object();
++	static HashSet<String> pendingConfigChanges = new HashSet<String>();
++
++	String zookeeperString;
++
++	public ZookeeperConfigurationStorage() {
++		zookeeperString = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
++	}
++
++	public void clearCache() {
++		synchronized (configCacheLock) {
++			configCache = null;
++		}
++	}
++	
++	@Override
++	public void storeConfig(ConfigContainer config) {
++		synchronized (configCacheLock) {
++			ZooKeeper zk = null;
++			String configTxt = null;
++			try {
++				configTxt = JSONUtils.toJSON(config);
++				zk = getZkConnectionSynchronously();
++				if (zk.exists(getZookeeperConfigPath(), false) == null) {
++					//config file doesn't exist in zookeeper yet, write default config
++					logger.log(Level.WARNING, "Zookeeper config not found - creating it before writing: {0}", configTxt);
++					initializeConfiguration(zk, configTxt);
++				}
++				zk.setData(getZookeeperConfigPath(), configTxt.getBytes("UTF-8"), -1);
++				configCache = configTxt;
++			} catch (InterruptedException e) {
++				e.printStackTrace();
++				throw new RuntimeException("A zookeeper connection could not be established in time to write settings");
++			} catch (KeeperException e) {
++				if (Code.NONODE.equals(e.code())) {
++					logger.info("Setting could not be written, the required node is not available!");
++					initializeConfiguration(zk, configTxt);
++					return;
++				}
++				//This should never happen! Only NoNode or BadVersion codes are possible. Because the file version is ignored, a BadVersion should never occur
++				throw new RuntimeException("A zookeeper connection could not be established because of an unknown exception", e);
++			} catch (UnsupportedEncodingException e) {
++				throw new RuntimeException("A zookeeper connection could not be established because of an incorrect encoding");
++			} catch (JSONException e) {
++				throw new RuntimeException("Configuration is not valid", e);
++			} finally {
++				if (zk != null) {
++					try {
++						zk.close();
++					} catch (InterruptedException e) {
++						e.printStackTrace();
++					}
++				}
++			}
++		}
++	}
++
++	@Override
++	public ConfigContainer getConfig(ConfigContainer defaultConfiguration) {
++		synchronized (configCacheLock) {
++			if (configCache == null) {
++				ZooKeeper zk = getZkConnectionSynchronously();
++				try {
++					if (zk.exists(getZookeeperConfigPath(), false) == null) {
++						//config file doesn't exist in zookeeper yet, write default config
++						String defaultConfigString = JSONUtils.toJSON(defaultConfiguration);
++						logger.log(Level.WARNING, "Zookeeper config not found - creating now with default: {0}", defaultConfigString);
++						initializeConfiguration(zk, defaultConfigString);
++					}
++					byte[] configBytes = zk.getData(getZookeeperConfigPath(), true, new Stat());
++					if (configBytes != null) {
++						String configString = new String(configBytes, "UTF-8");
++						configCache = configString;
++					} else {
++						// should never happen
++						throw new RuntimeException("Zookeeper configuration was not stored");
++					}
++				} catch (KeeperException e) {
++					throw new RuntimeException(MessageFormat.format("Zookeeper config could not be read, {0} Zookeeper exception occured!", e.code().name()), e);
++				} catch (InterruptedException e) {
++					throw new RuntimeException("Zookeeper config could not be read, the connection was interrupded", e);
++				} catch (IOException | JSONException e) {
++					throw new RuntimeException("Zookeeper config could not be read, the file could not be parsed correctly", e);
++				} finally {
++					if (zk != null) {
++						try {
++							zk.close();
++						} catch (InterruptedException e) {
++							e.printStackTrace();
++						}
++
++					}
++				}
++
++			}
++			try {
++				return JSONUtils.fromJSON(configCache, ConfigContainer.class);
++			} catch (JSONException e) {
++				throw new RuntimeException("Cached configuration was not valid", e);
++			}
++		}
++	}
++
++	private void initializeConfiguration(ZooKeeper zk, String config) {
++		try {
++			if (getZookeeperConfigPath().contains("/")) {
++				String[] nodes = getZookeeperConfigPath().split("/");
++				StringBuilder path = new StringBuilder();
++				for (String node : nodes) {
++					if (node.trim().equals("")) {
++						//ignore empty paths
++						continue;
++					}
++					path.append("/" + node);
++					try {
++						zk.create(path.toString(), new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
++					} catch (NodeExistsException ex) {
++						//ignore if node already exists and continue with next node
++					}
++				}
++			}
++
++			//use version -1 to ignore versioning conflicts
++			try {
++				zk.setData(getZookeeperConfigPath(), config.toString().getBytes("UTF-8"), -1);
++			} catch (UnsupportedEncodingException e) {
++				// should not happen
++				throw new RuntimeException(e);
++			}
++		} catch (KeeperException e) {
++			throw new RuntimeException(MessageFormat.format("The zookeeper config could not be initialized, a Zookeeper exception of type {0} occured!", e.code().name()), e);
++		} catch (InterruptedException e) {
++			throw new RuntimeException("The zookeeper config could not be initialized, the connection got interrupted!", e);
++		}
++	}
++
++	private ZooKeeper getZkConnectionSynchronously() {
++		final CountDownLatch latch = new CountDownLatch(1);
++		logger.log(Level.FINE, "Trying to connect to zookeeper at {0}", zookeeperString);
++		ZooKeeper zk = null;
++		try {
++			int timeout = 5;
++			zk = new ZooKeeper(zookeeperString, timeout * 1000, new Watcher() {
++
++				@Override
++				public void process(WatchedEvent event) {
++					if (event.getState().equals(Watcher.Event.KeeperState.ConnectedReadOnly) || event.getState().equals(Watcher.Event.KeeperState.SyncConnected)) {
++						//count down latch, connected successfully to zk
++						latch.countDown();
++					}
++				}
++			});
++			//block thread till countdown, maximum of "timeout" seconds
++			latch.await(5 * timeout, TimeUnit.SECONDS);
++			if (latch.getCount() > 0) {
++				zk.close();
++				throw new RuntimeException("The zookeeper connection could not be retrieved on time!");
++			}
++			return zk;
++		} catch (IOException e1) {
++			throw new RuntimeException("The zookeeper connection could not be retrieved, the connection failed!", e1);
++		} catch (InterruptedException e) {
++			throw new RuntimeException("Zookeeper connection could not be retrieved, the thread was interrupted!", e);
++		}
++	}
++
++	public String getZookeeperConfigPath() {
++		return ZOOKEEPER_CONFIG_PATH;
++	}
++
++	@Override
++	public void onConfigChange(ConfigContainer container) {
++		synchronized (configCacheLock) {
++			try {
++				configCache = JSONUtils.toJSON(container);
++			} catch (JSONException e) {
++				throw new RuntimeException("Config could not be cloned!", e);
++			}
++		}
++	}
++
++	@Override
++	public void addPendingConfigChange(String changeId) {
++		synchronized (configCacheLock) {
++			pendingConfigChanges.add(changeId);
++		}
++	}
++
++	@Override
++	public void removePendingConfigChange(String changeId) {
++		synchronized (configCacheLock) {
++			pendingConfigChanges.remove(changeId);
++		}
++	}
++
++	@Override
++	public boolean isConfigChangePending(String changeId) {
++		synchronized (configCacheLock) {
++			return pendingConfigChanges.contains(changeId);
++		}
++	}
++}
+diff --git a/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties b/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties
+new file mode 100755
+index 0000000..7234e9c
+--- /dev/null
++++ b/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties
+@@ -0,0 +1,34 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Licensed to the Apache Software Foundation (ASF) under one or more
++# contributor license agreements.  See the NOTICE file distributed with
++# this work for additional information regarding copyright ownership.
++# The ASF licenses this file to You under the Apache License, Version 2.0
++# (the "License"); you may not use this file except in compliance with
++# the License.  You may obtain a copy of the License at
++# 
++#    http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# the directory where the snapshot is stored.
++dataDir=/tmp/odf-embedded-test-kafka/zookeeper
++# the port at which the clients will connect
++clientPort=2181
++# disable the per-ip limit on the number of connections since this is a non-production config
++maxClientCnxns=0
++
+diff --git a/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..65a7b5d
+--- /dev/null
++++ b/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,14 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++ODFConfigurationStorage=org.apache.atlas.odf.core.store.zookeeper34.ZookeeperConfigurationStorage
+diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
+new file mode 100755
+index 0000000..9650bd6
+--- /dev/null
++++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
+@@ -0,0 +1,181 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.store.zookeeper34.test;
++
++import java.io.File;
++import java.io.FileNotFoundException;
++import java.io.IOException;
++import java.net.BindException;
++import java.net.DatagramSocket;
++import java.net.ServerSocket;
++import java.rmi.NotBoundException;
++import java.util.Properties;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.zookeeper.ZooKeeper;
++import org.apache.zookeeper.ZooKeeper.States;
++import org.apache.zookeeper.server.ServerConfig;
++import org.apache.zookeeper.server.ZooKeeperServerMain;
++import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
++
++import org.apache.atlas.odf.core.Utils;
++
++public class TestZookeeper {
++
++	public TestZookeeper() {
++	}
++
++	public void start() {
++		try {
++			startZookeeper();
++		} catch (Exception e) {
++			e.printStackTrace();
++			throw new RuntimeException(e);
++		}
++	}
++
++	public static boolean deleteRecursive(File path) throws FileNotFoundException {
++		if (!path.exists()) {
++			throw new FileNotFoundException(path.getAbsolutePath());
++		}
++		boolean ret = true;
++		if (path.isDirectory()) {
++			for (File f : path.listFiles()) {
++				ret = ret && deleteRecursive(f);
++			}
++		}
++		return ret && path.delete();
++	}
++
++	static Thread zookeeperThread = null;
++	static Object lockObject = new Object();
++	static ZooKeeperServerMainWithShutdown zooKeeperServer = null;
++
++	boolean cleanData = true; // all data is cleaned at server start !!
++
++	Logger logger = Logger.getLogger(TestZookeeper.class.getName());
++
++	void log(String s) {
++		logger.info(s);
++	}
++
++	int zookeeperStartupTime = 10000;
++
++	static class ZooKeeperServerMainWithShutdown extends ZooKeeperServerMain {
++		public void shutdown() {
++			super.shutdown();
++		}
++	}
++
++	private void startZookeeper() throws Exception {
++		log("Starting zookeeper");
++
++		final Properties zkProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties");
++		log("zookeeper properties: " + zkProps);
++		if (cleanData) {
++			String dataDir = zkProps.getProperty("dataDir");
++			log("Removing all data from zookeeper data dir " + dataDir);
++			File dir = new File(dataDir);
++			if (dir.exists()) {
++				if (!deleteRecursive(dir)) {
++					throw new IOException("Could not delete directory " + dataDir);
++				}
++			}
++		}
++		final ZooKeeperServerMainWithShutdown zk = new ZooKeeperServerMainWithShutdown();
++		final ServerConfig serverConfig = new ServerConfig();
++		log("Loading zookeeper config...");
++		QuorumPeerConfig zkConfig = new QuorumPeerConfig();
++		zkConfig.parseProperties(zkProps);
++		serverConfig.readFrom(zkConfig);
++		final String zkPort = (String) zkProps.get("clientPort");
++
++		Runnable zookeeperStarter = new Runnable() {
++
++			@Override
++			public void run() {
++				try {
++					log("Now starting Zookeeper with API...");
++					zk.runFromConfig(serverConfig);
++				} catch (BindException ex) {
++					log("Embedded zookeeper could not be started, port is already in use. Trying to use external zookeeper");
++					ZooKeeper zK = null;
++					try {
++						zK = new ZooKeeper("localhost:" + zkPort, 5000, null);
++						if (zK.getState().equals(States.CONNECTED)) {
++							log("Using existing zookeeper running on port " + zkPort);
++							return;
++						} else {
++							throw new NotBoundException();
++						}
++					} catch (Exception zkEx) {
++						throw new RuntimeException("Could not connect to zookeeper on port " + zkPort + ". Please close all applications listening on this port.");
++					} finally {
++						if (zK != null) {
++							try {
++								zK.close();
++							} catch (InterruptedException e) {
++								logger.log(Level.WARNING, "An error occured closing the zk connection", e);
++							}
++						}
++					}
++				} catch (Exception e) {
++					e.printStackTrace();
++					throw new RuntimeException(e);
++				}
++
++			}
++		};
++
++		zookeeperThread = new Thread(zookeeperStarter);
++		zookeeperThread.setDaemon(true);
++		zookeeperThread.start();
++		log("Zookeeper start initiated, waiting 10s...");
++		Thread.sleep(10000);
++		zooKeeperServer = zk;
++		log("Zookeeper started");
++
++	}
++
++	public boolean isRunning() {
++		return zooKeeperServer != null;
++	}
++
++	boolean isPortAvailable(int port) {
++		ServerSocket ss = null;
++		DatagramSocket ds = null;
++		try {
++			ss = new ServerSocket(port);
++			ss.setReuseAddress(true);
++			ds = new DatagramSocket(port);
++			ds.setReuseAddress(true);
++			return true;
++		} catch (IOException e) {
++		} finally {
++			if (ds != null) {
++				ds.close();
++			}
++
++			if (ss != null) {
++				try {
++					ss.close();
++				} catch (IOException e) {
++				}
++			}
++		}
++
++		return false;
++	}
++}
+diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
+new file mode 100755
+index 0000000..1db55f2
+--- /dev/null
++++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
+@@ -0,0 +1,54 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.core.store.zookeeper34.test;
++
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.core.configuration.ConfigContainer;
++import org.apache.atlas.odf.core.store.zookeeper34.ZookeeperConfigurationStorage;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++/**
++ * this test uses the real storage implementation therefore a zookeeper is required
++ */
++public class ZookeeperConfigurationStorageTest {
++	@BeforeClass
++	public static void setup() {
++		new TestZookeeper().start();
++	}
++
++	@Test
++	public void testStoreInZookeeper() {
++		ZookeeperConfigurationStorage store = new ZookeeperConfigurationStorage() {
++
++			@Override
++			public String getZookeeperConfigPath() {
++				return "/odf/testconfig";
++			}
++			
++		};
++		ConfigContainer container = new ConfigContainer();
++		ODFSettings odfConfig = new ODFSettings();
++		String instanceId = "my_test_id";
++		odfConfig.setInstanceId(instanceId);
++		container.setOdf(odfConfig);
++		store.storeConfig(container);
++
++		ConfigContainer updatedContainer = store.getConfig(null);
++		Assert.assertEquals(instanceId, updatedContainer.getOdf().getInstanceId());
++		store.clearCache();
++		
++	}
++}
+diff --git a/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+new file mode 100755
+index 0000000..2a5f331
+--- /dev/null
++++ b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
+@@ -0,0 +1,16 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++## USE for TESTs only
++
++DiscoveryServiceQueueManager=MockQueueManager
+diff --git a/odf/odf-test-env/.gitignore b/odf/odf-test-env/.gitignore
+new file mode 100755
+index 0000000..2045ff3
+--- /dev/null
++++ b/odf/odf-test-env/.gitignore
+@@ -0,0 +1,5 @@
++target
++.settings
++.classpath
++.project
++.DS_Store
+diff --git a/odf/odf-test-env/pom.xml b/odf/odf-test-env/pom.xml
+new file mode 100755
+index 0000000..a37ed22
+--- /dev/null
++++ b/odf/odf-test-env/pom.xml
+@@ -0,0 +1,142 @@
++<?xml version="1.0"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
++	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-test-env</artifactId>
++	<name>odf-test-env</name>
++	<url>http://maven.apache.org</url>
++	<properties>
++		<!-- specify versions of components to be downloaded -->
++		<jetty.version>9.2.10.v20150310</jetty.version>
++		<kafka.version>0.10.0.0</kafka.version>
++		<scala.version>2.11</scala.version>
++		<spark.version>2.1.0</spark.version>
++		<jetty.port>58081</jetty.port>
++	</properties>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-web</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>war</type>
++		</dependency>
++	</dependencies>
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-compiler-plugin</artifactId>
++				<executions>
++					<execution>
++						<id>default-compile</id>
++						<phase>compile</phase>
++						<goals>
++							<goal>compile</goal>
++						</goals>
++						<configuration>
++							<skipMain>true</skipMain>
++							<!-- do not compile anything -->
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<skipTests>true</skipTests>
++					<!-- do not run tests -->
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-antrun-plugin</artifactId>
++				<version>1.8</version>
++				<executions>
++					<execution>
++						<id>prepare-atlas</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target>
++								<property name="atlas-unpack-dir" value="${project.build.directory}/downloads" />
++								<property name="atlas.version" value="${atlas.version}" />
++								<ant antfile="../odf-atlas/build_atlas.xml" target="prepare-atlas"></ant>
++							</target>
++						</configuration>
++					</execution>
++					<execution>
++						<id>prepare-components</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target>
++								<property name="unpack-dir" value="${project.build.directory}/downloads" />
++								<property name="jetty.version" value="${jetty.version}" />
++								<property name="jetty.port" value="${jetty.port}" />
++								<property name="kafka.version" value="${kafka.version}" />
++								<property name="scala.version" value="${scala.version}" />
++								<property name="project.basedir" value="${project.basedir}"/>
++								<ant antfile="prepare_components.xml" target="default"></ant>
++							</target>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-assembly-plugin</artifactId>
++				<configuration>
++					<descriptor>src/assembly/bin.xml</descriptor>
++					<finalName>odf-test-env-${project.version}</finalName>
++				</configuration>
++				<executions>
++					<execution>
++						<id>create-distribution</id>
++						<phase>package</phase>
++						<goals>
++							<goal>single</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<artifactId>maven-jar-plugin</artifactId>
++				<version>2.3.1</version>
++				<executions>
++					<execution>
++						<id>default-jar</id>
++						<!-- do not create default-jar -->
++						<phase>none</phase>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++</project>
+diff --git a/odf/odf-test-env/prepare_components.xml b/odf/odf-test-env/prepare_components.xml
+new file mode 100755
+index 0000000..a6a733b
+--- /dev/null
++++ b/odf/odf-test-env/prepare_components.xml
+@@ -0,0 +1,169 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project name="prepare_components">
++
++	<!-- Property is provided by pom.xml -->
++	<!-- <property name="jetty.version" value="" /> -->
++	<!-- <property name="kafka.version" value="" /> -->
++	<!-- <property name="scala.version" value="" /> -->
++
++	<dirname property="script.basedir" file="${ant.file.prepare_components}" />
++
++	<property name="jetty-dir" value="jetty-distribution-${jetty.version}" />
++	<property name="kafka-dir" value="kafka_${scala.version}-${kafka.version}" />
++	<property name="spark-dir" value="spark_${spark.version}" />
++
++	<property name="jetty-archive" value="/tmp/${jetty-dir}.zip" />
++	<property name="kafka-archive" value="/tmp/${kafka-dir}.tar.gz" />
++	<property name="spark-archive" value="/tmp/${spark-dir}.tar.gz" />
++
++	<condition property="jetty-zip-not-found">
++		<not>
++			<available file="${jetty-archive}">
++			</available>
++		</not>
++	</condition>
++
++	<condition property="kafka-zip-not-found">
++		<not>
++			<available file="${kafka-archive}">
++			</available>
++		</not>
++	</condition>
++
++	<condition property="spark-zip-not-found">
++		<not>
++			<available file="${spark-archive}">
++			</available>
++		</not>
++	</condition>
++
++	<condition property="jetty-unpacked">
++	   <available file="${unpack-dir}/${jetty-dir}/bin/jetty.sh"/>
++    </condition>
++
++	<condition property="kafka-unpacked">
++	   <available file="${unpack-dir}/${kafka-dir}/bin/kafka-server-start.sh"/>
++    </condition>
++
++	<condition property="spark-unpacked">
++	   <available file="${unpack-dir}/${spark-dir}/sbin/start-master.sh"/>
++    </condition>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="download-jetty" if="jetty-zip-not-found">
++		<echo message="Downloading Jetty. Depending on your network this can last up to 20 (yes, twenty) minutes." />
++		<get verbose="true" src="https://repo1.maven.org/maven2/org/eclipse/jetty/jetty-distribution/${jetty.version}/jetty-distribution-${jetty.version}.zip" dest="${jetty-archive}" />
++		<echo message="Jetty downloaded" />
++	</target>
++
++	<target name="download-kafka" if="kafka-zip-not-found">
++		<echo message="Downloading Kafka. Depending on your network this can last up to 20 (yes, twenty) minutes." />
++		<get verbose="true" src="http://ftp-stud.hs-esslingen.de/pub/Mirrors/ftp.apache.org/dist/kafka/${kafka.version}/kafka_${scala.version}-${kafka.version}.tgz" dest="${kafka-archive}" />
++		<echo message="Kafka downloaded" />
++	</target>
++
++	<target name="download-spark" if="spark-zip-not-found">
++		<echo message="Downloading Spark. Depending on your network this can last up to 20 (yes, twenty) minutes." />
++		<get verbose="true" src="http://d3kbcqa49mib13.cloudfront.net/spark-${spark.version}-bin-hadoop2.7.tgz" dest="${spark-archive}" />
++		<echo message="Spark downloaded" />
++	</target>
++
++	<target name="unzip-jetty" unless="jetty-unpacked">
++		<antcall target="download-jetty"/>
++		<echo message="Installing Jetty test instance" />
++		<echo message="Deleting ${unpack-dir}/${jetty-dir}" />
++		<delete dir="${unpack-dir}/${jetty-dir}" />
++		<echo message="deleted" />
++		<unzip src="${jetty-archive}" dest="${unpack-dir}" />
++		<!-- Create Jetty base folder -->
++		<mkdir dir="${unpack-dir}/odfjettybase"/>
++		<!-- Generate Jetty base configuration files -->
++		<java dir="${unpack-dir}/odfjettybase" classname="org.eclipse.jetty.start.Main" fork="true">
++			<arg value="--add-to-startd=https,ssl,deploy,plus"/>
++			<classpath>
++				<pathelement location="${unpack-dir}/${jetty-dir}/start.jar"/>
++				<pathelement path="${unpack-dir}/${jetty-dir}"/>
++				<pathelement path="${java.class.path}"/>
++			</classpath>
++			<jvmarg value="-Djetty.home=${unpack-dir}/${jetty-dir}"/>
++			<jvmarg value="-Djetty.base=${unpack-dir}/odfjettybase"/>
++		</java>
++		<!-- Update Jetty port number -->
++		<replace file="${unpack-dir}/odfjettybase/start.d/https.ini" token="https.port=8443" value="https.port=${jetty.port}"/>
++	</target>
++
++	<target name="unzip-kafka" unless="kafka-unpacked">
++		<antcall target="download-kafka"/>
++		<echo message="Installing Kafka test instance" />
++		<echo message="Deleting ${unpack-dir}/${kafka-dir}" />
++		<delete dir="${unpack-dir}/${kafka-dir}" />
++		<echo message="deleted" />
++	    <untar src="${kafka-archive}" dest="${unpack-dir}" compression="gzip" />
++
++		<!-- remove -loggc command line argument in scripts because they don't exist in the IBM JVM -->
++		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/kafka-server-start.sh" token="-loggc" value=""/>
++		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/zookeeper-server-start.sh" token="-loggc" value=""/>
++	</target>
++
++	<target name="unzip-spark" unless="spark-unpacked">
++		<antcall target="download-spark"/>
++		<echo message="Installing Spark test instance" />
++		<echo message="Deleting ${unpack-dir}/${spark-dir}" />
++		<delete dir="${unpack-dir}/${spark-dir}" />
++		<echo message="deleted" />
++	    <untar src="${spark-archive}" dest="${unpack-dir}" compression="gzip" />
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="enable-jetty-basic-authentication">
++		<echo message="Enabling jetty basic authentication..." />
++		<echo message="Updating jetty.xml file..." />
++		<replace file="${unpack-dir}/${jetty-dir}/etc/jetty.xml">
++			<!-- See corresponding config in web.xml file of SDP webapp -->
++			<replacetoken><![CDATA[</Configure>]]></replacetoken>
++			<replacevalue>
++				<![CDATA[
++	<Call name="addBean">
++		<Arg>
++			<New class="org.eclipse.jetty.security.HashLoginService">
++				<Set name="name">ODF Realm</Set>
++				<Set name="config"><SystemProperty name="jetty.home" default="."/>/etc/realm.properties</Set>
++			</New>
++		</Arg>
++	</Call>
++</Configure>
++				]]>
++			</replacevalue>
++		</replace>
++		<echo message="Copying credentials file..." />
++		<copy file="${script.basedir}/../jettyconfig/realm.properties" tofile="${unpack-dir}/${jetty-dir}/etc/realm.properties" overwrite="true"/>
++		<echo message="Jetty basic authentication has been enabled." />
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="default">
++		<mkdir dir="${unpack-dir}"/>
++		<antcall target="unzip-jetty"/>
++		<antcall target="enable-jetty-basic-authentication"/>
++		<antcall target="unzip-kafka"/>
++		<antcall target="unzip-spark"/>
++	</target>
++
++</project>
+diff --git a/odf/odf-test-env/src/assembly/bin.xml b/odf/odf-test-env/src/assembly/bin.xml
+new file mode 100755
+index 0000000..b5731a7
+--- /dev/null
++++ b/odf/odf-test-env/src/assembly/bin.xml
+@@ -0,0 +1,73 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<assembly
++	xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
++	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
++	<id>bin</id>
++	<formats>
++		<format>zip</format>
++	</formats>
++	<fileSets>
++		<fileSet>
++			<outputDirectory>/</outputDirectory>
++			<directory>target/downloads</directory>
++			<excludes>
++				<exclude>*.zip</exclude>
++				<exclude>*.gz</exclude>
++				<exclude>**/zookeeper.properties</exclude>
++				<exclude>**/server.properties</exclude>
++			</excludes>
++			<fileMode>0755</fileMode>
++		</fileSet>
++		<fileSet>
++			<outputDirectory>/</outputDirectory>
++			<directory>src/main/scripts</directory>
++			<fileMode>0755</fileMode>
++			<excludes>
++			   <exclude>**/jenkins-*.sh</exclude>
++			</excludes>
++		</fileSet>
++		<fileSet>
++			<outputDirectory>/kafka_${scala.version}-${kafka.version}/config</outputDirectory>
++			<directory>src/main/config</directory>
++			<includes>
++				<include>*.properties</include>
++			</includes>
++		</fileSet>
++		<fileSet>
++			<directory>../odf-doc/target/site</directory>
++			<outputDirectory>/odf-documentation</outputDirectory>
++		</fileSet>
++	</fileSets>
++	<files>
++		<file>
++			<source>../odf-doc/src/site/markdown/test-env.md</source>
++			<outputDirectory>/</outputDirectory>
++			<destName>README.md</destName>
++		</file>
++	</files>
++	<dependencySets>
++		<dependencySet>
++			<outputDirectory>/odfjettybase/webapps</outputDirectory>
++			<includes>
++				<include>*:war:*</include>
++			</includes>
++			<excludes>
++				<exclude>*:jar:*</exclude>
++			</excludes>
++		</dependencySet>
++	</dependencySets>
++</assembly>
+diff --git a/odf/odf-test-env/src/main/config/server.properties b/odf/odf-test-env/src/main/config/server.properties
+new file mode 100755
+index 0000000..1f2a406
+--- /dev/null
++++ b/odf/odf-test-env/src/main/config/server.properties
+@@ -0,0 +1,134 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Licensed to the Apache Software Foundation (ASF) under one or more
++# contributor license agreements.  See the NOTICE file distributed with
++# this work for additional information regarding copyright ownership.
++# The ASF licenses this file to You under the Apache License, Version 2.0
++# (the "License"); you may not use this file except in compliance with
++# the License.  You may obtain a copy of the License at
++# 
++#    http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# see kafka.server.KafkaConfig for additional details and defaults
++
++############################# Server Basics #############################
++
++# The id of the broker. This must be set to a unique integer for each broker.
++broker.id=0
++
++############################# Socket Server Settings #############################
++
++# The port the socket server listens on
++port=59092
++
++# Hostname the broker will bind to. If not set, the server will bind to all interfaces
++#host.name=localhost
++
++# Hostname the broker will advertise to producers and consumers. If not set, it uses the
++# value for "host.name" if configured.  Otherwise, it will use the value returned from
++# java.net.InetAddress.getCanonicalHostName().
++#advertised.host.name=<hostname routable by clients>
++
++# The port to publish to ZooKeeper for clients to use. If this is not set,
++# it will publish the same port that the broker binds to.
++#advertised.port=<port accessible by clients>
++
++# The number of threads handling network requests
++num.network.threads=3
++ 
++# The number of threads doing disk I/O
++num.io.threads=8
++
++# The send buffer (SO_SNDBUF) used by the socket server
++socket.send.buffer.bytes=102400
++
++# The receive buffer (SO_RCVBUF) used by the socket server
++socket.receive.buffer.bytes=102400
++
++# The maximum size of a request that the socket server will accept (protection against OOM)
++socket.request.max.bytes=104857600
++
++
++############################# Log Basics #############################
++
++# A comma seperated list of directories under which to store log files
++log.dirs=/tmp/odftestenv-kafka-logs
++
++# The default number of log partitions per topic. More partitions allow greater
++# parallelism for consumption, but this will also result in more files across
++# the brokers.
++num.partitions=1
++
++# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
++# This value is recommended to be increased for installations with data dirs located in RAID array.
++num.recovery.threads.per.data.dir=1
++
++############################# Log Flush Policy #############################
++
++# Messages are immediately written to the filesystem but by default we only fsync() to sync
++# the OS cache lazily. The following configurations control the flush of data to disk. 
++# There are a few important trade-offs here:
++#    1. Durability: Unflushed data may be lost if you are not using replication.
++#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
++#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
++# The settings below allow one to configure the flush policy to flush data after a period of time or
++# every N messages (or both). This can be done globally and overridden on a per-topic basis.
++
++# The number of messages to accept before forcing a flush of data to disk
++#log.flush.interval.messages=10000
++
++# The maximum amount of time a message can sit in a log before we force a flush
++#log.flush.interval.ms=1000
++
++############################# Log Retention Policy #############################
++
++# The following configurations control the disposal of log segments. The policy can
++# be set to delete segments after a period of time, or after a given size has accumulated.
++# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
++# from the end of the log.
++
++# The minimum age of a log file to be eligible for deletion
++log.retention.hours=168
++
++# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
++# segments don't drop below log.retention.bytes.
++#log.retention.bytes=1073741824
++
++# The maximum size of a log segment file. When this size is reached a new log segment will be created.
++log.segment.bytes=1073741824
++
++# The interval at which log segments are checked to see if they can be deleted according 
++# to the retention policies
++log.retention.check.interval.ms=300000
++
++# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
++# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
++log.cleaner.enable=false
++
++############################# Zookeeper #############################
++
++# Zookeeper connection string (see zookeeper docs for details).
++# This is a comma separated host:port pairs, each corresponding to a zk
++# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
++# You can also append an optional chroot string to the urls to specify the
++# root directory for all kafka znodes.
++zookeeper.connect=localhost:52181
++
++# Timeout in ms for connecting to zookeeper
++zookeeper.connection.timeout.ms=6000
+diff --git a/odf/odf-test-env/src/main/config/zookeeper.properties b/odf/odf-test-env/src/main/config/zookeeper.properties
+new file mode 100755
+index 0000000..5f4d7e0
+--- /dev/null
++++ b/odf/odf-test-env/src/main/config/zookeeper.properties
+@@ -0,0 +1,33 @@
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# Licensed to the Apache Software Foundation (ASF) under one or more
++# contributor license agreements.  See the NOTICE file distributed with
++# this work for additional information regarding copyright ownership.
++# The ASF licenses this file to You under the Apache License, Version 2.0
++# (the "License"); you may not use this file except in compliance with
++# the License.  You may obtain a copy of the License at
++# 
++#    http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# the directory where the snapshot is stored.
++dataDir=/tmp/odftestenv-zookeeper
++# the port at which the clients will connect
++clientPort=52181
++# disable the per-ip limit on the number of connections since this is a non-production config
++maxClientCnxns=0
+diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.bat b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
+new file mode 100755
+index 0000000..84c2449
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
+@@ -0,0 +1,22 @@
++REM
++REM Licensed under the Apache License, Version 2.0 (the "License");
++REM you may not use this file except in compliance with the License.
++REM You may obtain a copy of the License at
++REM
++REM   http://www.apache.org/licenses/LICENSE-2.0
++REM
++REM Unless required by applicable law or agreed to in writing, software
++REM distributed under the License is distributed on an "AS IS" BASIS,
++REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++REM See the License for the specific language governing permissions and
++REM limitations under the License.
++
++setlocal
++
++REM you should not have to change anything below this line ;-)
++
++set TESTENVDIR=%~dp0
++set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
++
++echo Delete atlas data
++del /F /S /Q "%ATLAS_HOME%\data"
+diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.sh b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
+new file mode 100755
+index 0000000..4eb3b1d
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
+@@ -0,0 +1,22 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++# You should not have to change anything below this line ;-)
++export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
++
++export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
++
++echo Delete atlas data
++rm -rf $ATLAS_HOME/data
+diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
+new file mode 100755
+index 0000000..92561ad
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
+@@ -0,0 +1,24 @@
++REM 
++REM Licensed under the Apache License, Version 2.0 (the "License");
++REM you may not use this file except in compliance with the License.
++REM You may obtain a copy of the License at
++REM
++REM   http://www.apache.org/licenses/LICENSE-2.0
++REM
++REM Unless required by applicable law or agreed to in writing, software
++REM distributed under the License is distributed on an "AS IS" BASIS,
++REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++REM See the License for the specific language governing permissions and
++REM limitations under the License.
++
++setlocal
++
++REM set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
++set ODF_GIT_DIR=c:\git\open-discovery-framework
++
++
++REM you should not have to change anything below this line ;-)
++
++set TESTENVDIR=%~dp0
++
++copy /Y %ODF_GIT_DIR%\odf-web\target\odf-web-1.2.0-SNAPSHOT.war %TESTENVDIR%\odfjettybase\webapps
+diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
+new file mode 100755
+index 0000000..732515a
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
+@@ -0,0 +1,21 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++# Set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
++export ODF_GIT_DIR=~/git/open-discovery-framework
++
++# You should not have to change anything below this line ;-)
++export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
++cp $ODF_GIT_DIR/odf-web/target/odf-web-1.2.0-SNAPSHOT.war $BASEDIR/odfjettybase/webapps
+diff --git a/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
+new file mode 100755
+index 0000000..e3f6c52
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
+@@ -0,0 +1,73 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++#
++# Script to download, start, and configure the ODF test environment.
++# JenkinsBuildNumber refers to the build number of the job Open-Discovery-Framework, see here:
++# https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/Open-Discovery-Framework
++#
++# Usage: download-install-odf-testenv.sh [<JenkinsBuildNumber> <Directory> ]
++#        Default values:
++#             <JenkinsBuildNumber>: lastSuccessfulBuild
++#             <Directory>: ~/odf-test-env
++#
++
++JENKINSBUILDNUMBER=$1
++if [ -z "$JENKINSBUILDNUMBER" ]; then
++   JENKINSBUILDNUMBER=lastSuccessfulBuild
++   echo Jenkins build number not provided, using default $JENKINSBUILDNUMBER
++fi
++
++TESTENVDIR=$2
++if [ -z "$TESTENVDIR" ]; then
++   TESTENVDIR=~/odf-test-env
++   echo Target directory not provided, using default $TESTENVDIR
++fi
++
++# hidden third parameter taking the jenkins job name
++JENKINSJOB=$3
++if [ -z "$JENKINSJOB" ]; then
++   JENKINSJOB=Open-Discovery-Framework
++   echo Jenkins job not provided, using default $JENKINSJOB
++fi
++
++echo Downloading test env to directory $TESTENVDIR, Jenkins build number: $JENKINSBUILDNUMBER
++
++
++TESTENVVERSION=1.2.0-SNAPSHOT
++TESTENVZIP=/tmp/odf-test-env.zip
++FULLHOSTNAME=`hostname -f`
++
++
++echo Downloading ODF test env
++curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$JENKINSJOB/$JENKINSBUILDNUMBER/artifact/odf-test-env/target/odf-test-env-$TESTENVVERSION-bin.zip --output $TESTENVZIP
++
++echo Stopping test env if it exists...
++$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh stop
++sleep 1
++echo Test env stopped
++
++echo Removing existing test env directory...
++rm -rf $TESTENVDIR/odf-test-env-$TESTENVVERSION
++echo Existing test env directory removed
++
++echo Unpacking $TESTENVZIP to $TESTENVDIR
++mkdir -p $TESTENVDIR
++unzip -q $TESTENVZIP -d $TESTENVDIR
++
++$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh cleanall
++
++echo ODF test env installed and started
++echo "Point your browser to https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT to check it out"
+diff --git a/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
+new file mode 100755
+index 0000000..bdb1428
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
+@@ -0,0 +1,69 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++# This is the script used in the job definition of our Jenkins job Manage-Install-ODF-Testenv
++# The original can be foudn in get: odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
++#
++# The Jenkins job should have the following parameters:
++#
++# 1. nodelabel: Label parameter. Default: odftestenv
++#
++# 2. action: Choice parameter with these choices: start, stop, cleanall, cleanconfig, cleanmetadata, install
++# Action description:
++#Available actions are:
++#<ul>
++#  <li>install: Remove the existing and install a new test environment build.
++#    Installs the most recent successful build by default. To change which build is used
++#    set the parameters <em>buildnumber</em> and <em>job</em> accordingly.</li>
++#  <li>start: (re)start the test environment</li>
++#  <li>stop:  stop the test environment</li>
++#  <li>cleanall: (re)starts with clean configuration and clean metadata</li>
++#  <li>cleanconfig   (re)starts with clean configuration</li>
++#  <li>cleanmetadata (re)starts with clean metadata</li>
++#</ul>
++#
++# 3. jenkinsjob: Choice parameter with choices: Shared-Discovery-Platform, Shared-Discovery-Platform-Parameters
++#
++# 4. buildnumber: String parmeter with default: lastSuccessfulBuild
++#
++
++echo Managing ODF test environment with parameters: action = $action, buildnumber = $buildnumber, jenkinsjob = $jenkinsjob
++
++if [ "$action" = "install" ]; then
++  ODFTESTENVTARGETDIR=/home/atlasadmin/odf-test-env
++  OUTPUTFILE=/tmp/download-install-odf-testenv.sh
++
++  if [ "$buildnumber" = "" ]; then
++    buildnumber=lastSuccessfulBuild
++  fi
++
++  if [ "$jenkinsjob" = "" ]; then
++    jenkinsjob=Shared-Discovery-Platform
++  fi
++
++  echo Downloading build number $buildnumber
++  curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$jenkinsjob/$buildnumber/artifact/odf-test-env/src/main/scripts/download-install-odf-testenv.sh --output $OUTPUTFILE
++
++  echo Running installer script on directory $ODFTESTENVTARGETDIR with build number $buildnumber
++  chmod 755 $OUTPUTFILE
++  export BUILD_ID=dontletjenkinskillme
++  echo Running command $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
++  $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
++else
++  TESTENVDIR=~/odf-test-env/odf-test-env-1.2.0-SNAPSHOT
++  export BUILD_ID=dontletjenkinskillme
++
++  $TESTENVDIR/odftestenv.sh $action
++fi
+diff --git a/odf/odf-test-env/src/main/scripts/odftestenv.sh b/odf/odf-test-env/src/main/scripts/odftestenv.sh
+new file mode 100755
+index 0000000..94d08f3
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/odftestenv.sh
+@@ -0,0 +1,232 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++
++# You should not have to change anything below this line ;-)
++###############################################################
++
++#############################################
++## Check that java and python are available
++
++
++if [ "x$JAVA_HOME" == "x" ]; then
++  echo "JAVA_HOME is not set, using standard java on path"
++  JAVAEXE=$(which java)
++else
++  echo "JAVA_HOME is set to $JAVA_HOME"
++  JAVAEXE=$JAVA_HOME/bin/java
++fi
++
++if [ ! -x $JAVAEXE ]; then
++   echo "Java executable $JAVAEXE could not be found. Set JAVA_HOME accordingly or make sure that java is in your path".
++   exit 1
++fi
++
++echo "Using java: $JAVAEXE"
++
++
++PYTHON27EXE=python
++PYTHONVERSION=`$PYTHON27EXE --version 2>&1`
++if [[ ! $PYTHONVERSION == *2.7.* ]]; then
++   echo "Warning: Python command is not version 2.7. Starting / stopping Atlas might not work properly"
++fi
++
++
++###############################################
++## Set some variables
++
++BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
++FULLHOSTNAME=`hostname -f`
++
++ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
++ATLAS_PORT=21453
++ATLAS_URL=https://localhost:$ATLAS_PORT
++ATLAS_USER=admin
++ATLAS_PASSWORD=UR0+HOiApXG9B8SNpKN5ww==
++
++ZK_DATADIR=/tmp/odftestenv-zookeeper
++KAFKA_DATADIR=/tmp/odftestenv-kafka-logs
++
++# export KAFKA_OPTS so that is picked up by the kafka and zookeeper start scripts. This can be used as a marker to search for those processes
++KILLMARKER=thisisanodftestenvprocess
++export KAFKA_OPTS="-D$KILLMARKER=true"
++KAFKA_HOME=$BASEDIR/kafka_2.11-0.10.0.0
++SPARK_HOME=$BASEDIR/spark-2.1.0-bin-hadoop2.7
++
++JETTY_BASE=$BASEDIR/odfjettybase
++JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
++
++##########################################
++## Copy required files
++
++if [ "$(uname)" == "Darwin" ]; then
++	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
++else
++	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
++fi
++
++##########################################
++## Functions
++
++function waitSeconds {
++   echo "     Waiting for $1 seconds..."
++   sleep $1
++}
++
++function cleanMetadata {
++	echo Removing Atlas data...
++	rm -rf $ATLAS_HOME/data
++	rm -rf $ATLAS_HOME/logs
++	echo Atlas data removed
++}
++
++function cleanConfig {
++	echo Removing Zookeeper and Kafka data...
++	rm -rf $KAFKA_DATADIR
++    rm -rf $ZK_DATADIR
++	echo Zookeeper and Kafka data removed.
++}
++
++function reconfigureODF {
++	echo Configuring ODF...
++    JSON='{ "sparkConfig": { "clusterMasterUrl": "'$SPARK_MASTER'" } }'
++    echo Updating config to $JSON
++    curl -H "Content-Type: application/json" -X PUT -d "$JSON" -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/settings
++    echo ODF configured.
++}
++
++function healthCheck {
++    echo Running ODF health check
++    curl -X GET -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/engine/health
++    echo Health check finished
++}
++
++function startTestEnv {
++   echo Starting ODF test env
++   if [ -f "$ZKDATADIR" ]; then
++      echo zookeeper data exists
++   fi
++
++   echo "Starting Zookeeper"
++   nohup $KAFKA_HOME/bin/zookeeper-server-start.sh $KAFKA_HOME/config/zookeeper.properties &> $BASEDIR/nohupzookeeper.out &
++   waitSeconds 5
++   echo "Starting Kafka"
++   nohup $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &> $BASEDIR/nohupkafka.out &
++   waitSeconds 5
++   if [[ $(unzip -v $JETTY_BASE/webapps/odf-web-1.2.0-SNAPSHOT.war | grep odf-atlas-) ]]; then
++     echo "Starting Atlas"
++     nohup $PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port $ATLAS_PORT &> $BASEDIR/nohupatlas.out &
++     waitSeconds 30
++   else
++       echo "Do not start Atlas because ODF was built without it."
++   fi
++   echo "Starting Spark master"
++   cd $SPARK_HOME
++   nohup sbin/start-master.sh &> $BASEDIR/nohupspark.out &
++   waitSeconds 5
++   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
++   echo "Spark master URL: $SPARK_MASTER"
++   echo "Starting Spark slave"
++   nohup sbin/start-slave.sh $SPARK_MASTER &> $BASEDIR/nohupspark.out &
++   waitSeconds 5
++   echo "Starting ODF on Jetty"
++   cd $JETTY_BASE
++   nohup $JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=$ATLAS_URL -Datlas.user=$ATLAS_USER -Datlas.password=$ATLAS_PASSWORD -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP &> $BASEDIR/nohupjetty.out &
++   waitSeconds 10
++
++   healthCheck
++   reconfigureODF
++
++   echo "ODF test env started on https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT"
++}
++
++function stopTestEnv {
++   echo Stopping ODF test env ...
++   echo Stopping kafka and zookeeper...
++   PROCESSNUM=`ps aux | grep $KILLMARKER | grep -v grep | wc | awk '{print $1}'`
++   if [ $PROCESSNUM -gt 0 ]; then
++      echo Killing $PROCESSNUM Kafka / ZK processes
++      kill -9 $(ps aux | grep $KILLMARKER | grep -v grep | awk '{print $2}')
++   else
++      echo No Kafka / Zookeeper processes found
++   fi
++   waitSeconds 3
++   echo Kafka and Zookeeper stopped
++   echo Stopping Atlas...
++   $PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
++   waitSeconds 5
++   echo Atlas stopped
++   echo Stopping Spark...
++   cd $SPARK_HOME
++   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
++   sbin/stop-slave.sh $SPARK_MASTER
++   sbin/stop-master.sh
++   waitSeconds 5
++   echo Spark stopped
++   echo Stopping Jetty...
++   cd $JETTY_BASE
++   $JAVAEXE -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP --stop
++   waitSeconds 5
++   echo Jetty stopped
++   echo ODF test env stopped
++}
++
++
++function usageAndExit {
++  echo "Usage: $0 start|stop|cleanconfig|cleanmetadata|cleanall"
++  echo "Manage the ODF test environment"
++  echo "Options:"
++  echo "         start         (re)start"
++  echo "         stop          stop"
++  echo "         cleanall      (re)starts with clean configuration and clean metadata"
++  echo "         cleanconfig   (re)starts with clean configuration"
++  echo "         cleanmetadata (re)starts with clean metadata"
++  exit 1;
++}
++
++###############################################
++## main script
++
++if [ -z "$1" ]; then
++   usageAndExit
++elif [ "$1" = "start" ]; then
++   echo "(Re) starting test env..."
++   stopTestEnv
++   echo "-------------------------------------"
++   startTestEnv
++   echo "Test env restarted"
++elif [ "$1" = "stop" ]; then
++   stopTestEnv
++elif [ "$1" = "cleanconfig" ]; then
++   echo "(Re) starting test env with clean configuration..."
++   stopTestEnv
++   cleanConfig
++   startTestEnv
++   echo "(Re)started test env with clean configuration"
++elif [ "$1" = "cleanmetadata" ]; then
++   echo "(Re) starting test env with clean metadata..."
++   stopTestEnv
++   cleanMetadata
++   startTestEnv
++   echo "(Re)started test env with clean metadata"
++elif [ "$1" = "cleanall" ]; then
++   echo "(Re) starting test env with clean configuration and metadata..."
++   stopTestEnv
++   cleanConfig
++   cleanMetadata
++   startTestEnv
++   echo "(Re)started test env with clean configuration and metadata"
++else
++   usageAndExit
++fi
+diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
+new file mode 100755
+index 0000000..db442e0
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
+@@ -0,0 +1,57 @@
++REM
++REM Licensed under the Apache License, Version 2.0 (the "License");
++REM you may not use this file except in compliance with the License.
++REM You may obtain a copy of the License at
++REM
++REM   http://www.apache.org/licenses/LICENSE-2.0
++REM
++REM Unless required by applicable law or agreed to in writing, software
++REM distributed under the License is distributed on an "AS IS" BASIS,
++REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++REM See the License for the specific language governing permissions and
++REM limitations under the License.
++
++setlocal
++
++set JAVAEXE=%JAVA_HOME%\bin\java.exe
++set PYTHON27EXE=python
++
++
++REM you should not have to change anything below this line ;-)
++
++set TESTENVDIR=%~dp0
++set JETTY_HOME=%TESTENVDIR%jetty-distribution-9.2.10.v20150310
++set KAFKA_PACKAGE_DIR=%TESTENVDIR%kafka_2.11-0.10.0.0
++set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
++
++echo Delete logs
++del /F /S /Q "C:\tmp\odftestenv-kafka-logs"
++del /F /S /Q "C:\tmp\odftestenv-zookeeper"
++
++echo Copy required files
++xcopy %ATLAS_HOME%\conf\atlas-application.properties_windows %ATLAS_HOME%\conf\atlas-application.properties /Y
++
++REM Workaround for issue #94 (Location of keystore files is hardcoded in Atlas config)
++if not exist "C:\tmp\apache-atlas-0.7-incubating-release\conf" (mkdir "C:\tmp\apache-atlas-0.7-incubating-release\conf")
++xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jceks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y
++xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y	
++
++echo Start zookeeper:
++start "Zookeeper" %KAFKA_PACKAGE_DIR%\bin\windows\zookeeper-server-start.bat %KAFKA_PACKAGE_DIR%\config\zookeeper.properties
++
++timeout 5 /NOBREAK
++
++echo Start kafka:
++start "Kafka" %KAFKA_PACKAGE_DIR%\bin\windows\kafka-server-start.bat %KAFKA_PACKAGE_DIR%\config\server.properties
++
++timeout 5 /NOBREAK
++
++echo Start Atlas
++start "Stop Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_stop.py
++start "Start Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_start.py -port 21443
++
++echo Start jetty
++set JETTY_BASE=%TESTENVDIR%odfjettybase
++rem set JETTY_BASE=%TESTENVDIR%base2
++cd %JETTY_BASE%
++start "Jetty" %JAVAEXE% -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=https://localhost:21443 -Datlas.user=admin -Datlas.password=UR0+HOiApXG9B8SNpKN5ww== -Dodf.logspec=ALL,/tmp/odf-test-env-trace.log -jar %JETTY_HOME%\start.jar
+diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
+new file mode 100755
+index 0000000..664b5a9
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
+@@ -0,0 +1,53 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++export JAVAEXE=java
++export PYTHON27EXE=python
++
++# You should not have to change anything below this line ;-)
++export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
++
++export JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
++export KAFKA_PACKAGE_DIR=$BASEDIR/kafka_2.11-0.10.0.0
++export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
++
++echo Delete logs
++rm -rf /tmp/odftestenv-kafka-logs
++rm -rf /tmp/odftestenv-zookeeper
++
++echo Copy required files
++if [ "$(uname)" == "Darwin" ]; then
++	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
++else
++	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
++fi
++
++echo Start zookeeper:
++$KAFKA_PACKAGE_DIR/bin/zookeeper-server-start.sh $KAFKA_PACKAGE_DIR/config/zookeeper.properties &
++
++sleep 5
++
++echo Start kafka:
++$KAFKA_PACKAGE_DIR/bin/kafka-server-start.sh $KAFKA_PACKAGE_DIR/config/server.properties &
++
++sleep 5
++
++echo Stop and restart Atlas
++$PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
++$PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port 21443
++
++echo Start jetty
++export JETTY_BASE=$BASEDIR/odfjettybase
++cd $JETTY_BASE
++$JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar &
+diff --git a/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
+new file mode 100755
+index 0000000..6f974b9
+--- /dev/null
++++ b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
+@@ -0,0 +1,16 @@
++#!/bin/bash
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#   http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++echo Stopping all processes of the odf-test-env...
++kill -9 $(ps aux | grep 'odf-test-env' | grep -v 'download-install' | grep -v 'stop-odf-testenv' | awk '{print $2}')
+diff --git a/odf/odf-web/.gitignore b/odf/odf-web/.gitignore
+new file mode 100755
+index 0000000..c290e84
+--- /dev/null
++++ b/odf/odf-web/.gitignore
+@@ -0,0 +1,11 @@
++.settings
++target
++.classpath
++.project
++.factorypath
++.externalToolBuilders
++build
++build/**
++node_modules
++node_modules/**
++.DS_Store
+diff --git a/odf/odf-web/download_swagger-ui.xml b/odf/odf-web/download_swagger-ui.xml
+new file mode 100755
+index 0000000..74ef82d
+--- /dev/null
++++ b/odf/odf-web/download_swagger-ui.xml
+@@ -0,0 +1,63 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project name="odf-download-swagger-ui">
++
++	<property name="swagger-dir" value="swagger-ui-${swagger.version}" />
++	<!-- download swagger ui directly from the web:
++	<property name="swagger-download" value="https://github.com/swagger-api/swagger-ui/archive/v${swagger.version}.tar.gz" />
++	<property name="swagger-archive" value="${unpack-dir}/${swagger-dir}.tar.gz" />
++	-->
++	<!-- download swagger ui from box: -->
++	<property name="swagger-download" value="https://ibm.box.com/shared/static/13cb0nobufykaxvrnezjf2fbtf0hpfn7.gz" />
++	<property name="swagger-archive" value="${unpack-dir}/swagger-ui-2.1.4.tar.gz" />
++
++	<condition property="swagger-zip-not-found">
++		<not>
++			<available file="${swagger-archive}">
++			</available>
++		</not>
++	</condition>
++
++	<condition property="swagger-unpacked">
++	   <available file="${unpack-dir}/${swagger-dir}/dist" type="dir" />
++    </condition>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="download-swagger-ui" if="swagger-zip-not-found">
++		<echo message="Downloading Swagger..." />
++		<get verbose="true" src="${swagger-download}" dest="${swagger-archive}" />
++		<echo message="Swagger downloaded" />
++	</target>
++
++	<target name="unzip-swagger" unless="swagger-unpacked">
++		<antcall target="download-swagger-ui"/>
++		<echo message="Installing Swagger" />
++		<echo message="Deleting ${unpack-dir}/${swagger-dir}" />
++		<delete dir="${unpack-dir}/${swagger-dir}" />
++		<echo message="Deleted" />
++	    <untar src="${swagger-archive}" dest="${unpack-dir}" compression="gzip" />
++	    <!-- <unzip src="${swagger-archive}" dest="${unpack-dir}" /> -->
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="default">
++		<mkdir dir="${unpack-dir}"/>
++		<antcall target="unzip-swagger"/>
++	</target>
++
++</project>
+diff --git a/odf/odf-web/package.json b/odf/odf-web/package.json
+new file mode 100755
+index 0000000..1fe599b
+--- /dev/null
++++ b/odf/odf-web/package.json
+@@ -0,0 +1,30 @@
++{
++  "name": "odf-web",
++  "version": "1.2.0-SNAPSHOT",
++  "main": "index.html",
++  "dependencies": {
++    "bootstrap": "^3.3.6",
++    "d3": "^3.5.12",
++    "react": "^0.14.6",
++    "jquery": "^2.2.0",
++    "react-addons-linked-state-mixin": "^0.14.6",
++    "react-bootstrap": "^0.28.2",
++    "react-dom": "^0.14.6",
++    "react-d3-components": "^0.6.1",
++    "bootstrap-material-design" : "^0.5.7",
++    "roboto-font": "^0.1.0"
++  },
++  "devDependencies": {    
++  	"webpack": "^1.12.11",
++  	"imports-loader": "^0.6.5",
++    "babel-core": "^6.4.0",
++    "babel-preset-es2015": "^6.3.13",
++    "babel-loader": "^6.2.1",
++    "babel-preset-react": "^6.3.13",
++    "url-loader": "^0.5.7",
++    "css-loader": "^0.23.1",
++    "style-loader": "^0.13.0"
++  },
++  "author": "IBM",
++  "license": "ISC"
++}
+diff --git a/odf/odf-web/pom.xml b/odf/odf-web/pom.xml
+new file mode 100755
+index 0000000..df0b702
+--- /dev/null
++++ b/odf/odf-web/pom.xml
+@@ -0,0 +1,441 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
++	<modelVersion>4.0.0</modelVersion>
++	<parent>
++		<groupId>org.apache.atlas.odf</groupId>
++		<artifactId>odf</artifactId>
++		<version>1.2.0-SNAPSHOT</version>
++	</parent>
++	<artifactId>odf-web</artifactId>
++	<packaging>war</packaging>
++	<properties>
++		<!-- specify versions of components to be downloaded -->
++		<swagger.version>2.1.4</swagger.version>
++		<swagger.base.path>/${project.artifactId}-${project.version}/odf/api/v1</swagger.base.path>
++	</properties>
++
++	<dependencies>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-api</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.glassfish.jersey.core</groupId>
++			<artifactId>jersey-server</artifactId>
++			<version>2.22.2</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>javax.ws.rs</groupId>
++			<artifactId>jsr311-api</artifactId>
++			<version>1.1.1</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>javax.servlet</groupId>
++			<artifactId>servlet-api</artifactId>
++			<version>2.5</version>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<artifactId>swagger-jaxrs</artifactId>
++			<version>1.5.9</version>
++			<groupId>io.swagger</groupId>
++			<scope>compile</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-doc</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>war</type>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-spark</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++			<exclusions>
++				<!-- Exclude this dependency to avoid the following error when running the jetty-maven-plugin:
++				 "A required class was missing while executing org.eclipse.jetty:jetty-maven-plugin:9.2.14.v20151106:start: com/sun/jersey/spi/inject/InjectableProvider" -->
++				<exclusion>
++					<groupId>org.apache.hadoop</groupId>
++					<artifactId>hadoop-hdfs</artifactId>
++				</exclusion>
++			</exclusions>
++		</dependency>
++		<!-- Required for compatibility with Spark cluster (must use same version) -->
++		<dependency>
++			<groupId>org.apache.commons</groupId>
++			<artifactId>commons-lang3</artifactId>
++			<version>3.5</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>junit</groupId>
++			<artifactId>junit</artifactId>
++			<version>4.12</version>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-core</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-messaging</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<type>test-jar</type>
++			<scope>test</scope>
++		</dependency>
++		<dependency>
++			<groupId>org.apache.atlas.odf</groupId>
++			<artifactId>odf-store</artifactId>
++			<version>1.2.0-SNAPSHOT</version>
++			<scope>runtime</scope>
++		</dependency>
++	</dependencies>
++
++	<repositories>
++		<repository>
++			<id>iis-central</id>
++			<name>Archiva Managed Maven Repository</name>
++			<url>http://iis-repo.swg.usma.ibm.com:8080/archiva/repository/all/</url>
++		</repository>
++	</repositories>
++
++	<profiles>
++		<profile>
++			<id>atlas</id>
++			<dependencies>
++				<dependency>
++					<groupId>org.apache.atlas.odf</groupId>
++					<artifactId>odf-atlas</artifactId>
++					<version>1.2.0-SNAPSHOT</version>
++					<scope>runtime</scope>
++				</dependency>
++			</dependencies>
++		</profile>
++		<profile>
++			<id>jenkinsbuild</id>
++			<properties>
++				<cf.password>${env.CFPASSWORD}</cf.password> <!-- Take cf.password from environment variable when running in Jenkins so that the password doesn't appear in the log -->
++			</properties>
++		</profile>
++		<profile>
++			<id>integration-tests</id>
++			<activation>
++				<property>
++					<name>reduced-tests</name>
++					<value>!true</value>
++				</property>
++			</activation>
++			<build>
++				<plugins>
++					<plugin>
++						<groupId>org.apache.maven.plugins</groupId>
++						<artifactId>maven-failsafe-plugin</artifactId>
++						<version>2.19</version>
++						<configuration>
++							<systemPropertyVariables>
++								<!-- we always use the embedded Kafka in our integration tests -->
++								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
++								<odf.test.base.url>${odf.test.base.url}</odf.test.base.url>
++								<odf.test.webapp.url>${odf.test.webapp.url}</odf.test.webapp.url>
++								<odf.test.user>${odf.test.user}</odf.test.user>
++								<odf.test.password>${odf.test.password}</odf.test.password>
++								<odf.logspec>${odf.integrationtest.logspec}.client</odf.logspec>
++								<!-- The atlas configuration properties are only required when the "atlas" profile is activated -->
++								<atlas.url>${atlas.url}</atlas.url>
++								<atlas.user>${atlas.user}</atlas.user>
++								<atlas.password>${atlas.password}</atlas.password>
++							</systemPropertyVariables>
++							<includes>
++								<include>**/integrationtest/**</include>
++							</includes>
++						</configuration>
++						<executions>
++							<execution>
++								<id>integration-test</id>
++								<goals>
++									<goal>integration-test</goal>
++								</goals>
++							</execution>
++							<execution>
++								<id>verify</id>
++								<goals>
++									<goal>verify</goal>
++								</goals>
++							</execution>
++						</executions>
++					</plugin>
++				</plugins>
++			</build>
++		</profile>
++	</profiles>
++
++	<build>
++		<plugins>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-surefire-plugin</artifactId>
++				<version>2.19</version>
++				<configuration>
++					<systemPropertyVariables>
++						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
++						<odf.build.project.name>${project.name}</odf.build.project.name>
++					</systemPropertyVariables>
++					<excludes>
++						<exclude>**/integrationtest/**</exclude>
++					</excludes>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.eclipse.jetty</groupId>
++				<artifactId>jetty-maven-plugin</artifactId>
++				<version>9.2.14.v20151106</version>
++				<configuration>
++					<jettyXml>${project.parent.basedir}/jettyconfig/jetty.xml,${project.parent.basedir}/jettyconfig/jetty-ssl.xml,${project.parent.basedir}/jettyconfig/jetty-https.xml</jettyXml>
++					<scanIntervalSeconds>10</scanIntervalSeconds>
++					<stopPort>8005</stopPort>
++					<stopKey>STOP</stopKey>
++					<systemProperties>
++						<systemProperty>
++							<name>odf.zookeeper.connect</name>
++							<value>${testZookeepeConnectionString}</value>
++						</systemProperty>
++						<systemProperty>
++							<name>odf.logspec</name>
++							<value>${odf.integrationtest.logspec}.jettyserver</value>
++						</systemProperty>
++						<systemProperty>
++							<name>jetty.config.dir</name>
++							<value>${project.parent.basedir}/target/jettyconfig</value>
++						</systemProperty>
++						<systemProperty>
++							<name>atlas.url</name>
++							<value>${atlas.url}</value>
++						</systemProperty>
++						<systemProperty>
++							<name>atlas.user</name>
++							<value>${atlas.user}</value>
++						</systemProperty>
++						<systemProperty>
++							<name>atlas.password</name>
++							<value>${atlas.password}</value>
++						</systemProperty>
++					</systemProperties>
++				</configuration>
++				<executions>
++					<execution>
++						<id>start-jetty</id>
++						<phase>pre-integration-test</phase>
++						<goals>
++							<goal>start</goal>
++						</goals>
++						<configuration>
++							<scanIntervalSeconds>0</scanIntervalSeconds>
++							<daemon>true</daemon>
++						</configuration>
++					</execution>
++					<execution>
++						<id>stop-jetty</id>
++						<phase>post-integration-test</phase>
++						<goals>
++							<goal>stop</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>com.github.eirslett</groupId>
++				<artifactId>frontend-maven-plugin</artifactId>
++				<version>0.0.27</version>
++				<configuration>
++					<installDirectory>build</installDirectory>
++				</configuration>
++
++				<executions>
++					<execution>
++						<id>install node and npm</id>
++						<goals>
++							<goal>install-node-and-npm</goal>
++						</goals>
++						<configuration>
++							<nodeVersion>v0.12.2</nodeVersion>
++							<npmVersion>2.7.6</npmVersion>
++						</configuration>
++					</execution>
++					<execution>
++						<id>npm install</id>
++						<goals>
++							<goal>npm</goal>
++						</goals>
++						<configuration>
++							<arguments>install</arguments>
++						</configuration>
++					</execution>
++					<execution>
++						<id>webpack build</id>
++						<goals>
++							<goal>webpack</goal>
++						</goals>
++						<configuration>
++							<!-- change to -p for production mode -->
++							<arguments>-d</arguments>
++						</configuration>
++					</execution>
++					<!-- <execution> <id>npm-list-packages</id> <goals> <goal>npm</goal>
++						</goals> <phase>validate</phase> <configuration> <arguments>ls depth=0</arguments>
++						</configuration> </execution> -->
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-war-plugin</artifactId>
++				<version>2.4</version>
++				<configuration>
++					<failOnMissingWebXml>false</failOnMissingWebXml>
++					<packagingExcludes>**/scripts/**</packagingExcludes>
++					<overlays>
++						<overlay>
++							<!-- define here which files you want to take over from the odf-doc
++								war. -->
++							<groupId>org.apache.atlas.odf</groupId>
++							<artifactId>odf-doc</artifactId>
++							<excludes>
++								<exclude>WEB-INF/web.xml</exclude>
++							</excludes>
++							<includes>
++								<include>doc/**</include>
++							</includes>
++						</overlay>
++					</overlays>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-antrun-plugin</artifactId>
++				<version>1.8</version>
++				<executions>
++					<execution>
++						<inherited>false</inherited>
++						<id>prepare-embedded-jetty</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target>
++								<ant antfile="../prepare_embedded_jetty.xml" target="prepare-jetty-config" />
++							</target>
++						</configuration>
++					</execution>
++					<execution>
++						<id>prepare-components</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<target>
++								<property name="unpack-dir" value="${project.build.directory}/downloads" />
++								<property name="swagger.version" value="${swagger.version}" />
++								<ant antfile="download_swagger-ui.xml" target="default"></ant>
++							</target>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<artifactId>maven-resources-plugin</artifactId>
++				<version>2.6</version>
++				<executions>
++					<execution>
++						<id>copy-resources</id>
++						<phase>process-resources</phase>
++						<goals>
++							<goal>copy-resources</goal>
++						</goals>
++						<configuration>
++							<outputDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</outputDirectory>
++							<resources>
++								<resource>
++									<directory>${project.build.directory}/downloads/swagger-ui-${swagger.version}/dist</directory>
++									<filtering>false</filtering>
++									<excludes>
++										<exclude>index.html</exclude>
++									</excludes>
++								</resource>
++							</resources>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>com.github.kongchen</groupId>
++				<artifactId>swagger-maven-plugin</artifactId>
++				<version>3.1.1</version>
++				<configuration>
++					<apiSources>
++						<apiSource>
++							<springmvc>false</springmvc>
++							<locations>org.apache.atlas.odf.admin.rest.resources</locations>
++							<schemes>https</schemes>
++							<basePath>${swagger.base.path}</basePath>
++							<info>
++								<title>Open Discovery Framework</title>
++								<version>v1</version>
++								<description>
++									API reference
++								</description>
++							</info>
++							<swaggerDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</swaggerDirectory>
++							<swaggerApiReader>com.wordnik.swagger.jaxrs.reader.DefaultJaxrsApiReader</swaggerApiReader>
++						</apiSource>
++					</apiSources>
++				</configuration>
++				<executions>
++					<execution>
++						<phase>compile</phase>
++						<goals>
++							<goal>generate</goal>
++						</goals>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++</project>
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
+new file mode 100755
+index 0000000..89756cc
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
+@@ -0,0 +1,71 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.log;
++
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.logging.Handler;
++import java.util.logging.Level;
++import java.util.logging.LogRecord;
++import java.util.logging.SimpleFormatter;
++
++public class LoggingHandler extends Handler {
++
++	private int LOG_CACHE_SIZE = 1000;
++	private static List<LogRecord> cachedLogs = Collections.synchronizedList(new ArrayList<LogRecord>());
++
++	@Override
++	public void publish(LogRecord record) {
++		cachedLogs.add(record);
++		if (cachedLogs.size() >= LOG_CACHE_SIZE) {
++			cachedLogs.remove(0);
++		}
++	}
++
++	@Override
++	public void flush() {
++		cachedLogs.clear();
++	}
++
++	@Override
++	public void close() throws SecurityException {
++		cachedLogs.clear();
++	}
++
++	public List<LogRecord> getCachedLog() {
++		return new ArrayList<LogRecord>(cachedLogs);
++	}
++
++	public String getFormattedCachedLog(Integer numberOfLogs, Level logLevel) {
++		final List<LogRecord> cachedLog = getCachedLog();
++		StringBuilder lg = new StringBuilder();
++		final SimpleFormatter simpleFormatter = new SimpleFormatter();
++		if (numberOfLogs != null) {
++			for (int no = numberOfLogs; no > 0; no--) {
++				if (no > -1 && no < cachedLog.size() - 1) {
++					final LogRecord record = cachedLog.get(cachedLog.size() - no);
++					if (record.getLevel().intValue() >= logLevel.intValue()) {
++						lg.append(simpleFormatter.format(record));
++					}
++				}
++			}
++		} else {
++			for (LogRecord record : cachedLog) {
++				lg.append(simpleFormatter.format(record));
++			}
++		}
++		return lg.toString();
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
+new file mode 100755
+index 0000000..b51da36
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
+@@ -0,0 +1,50 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest;
++
++import java.util.HashSet;
++import java.util.Set;
++
++import javax.ws.rs.core.Application;
++
++import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider;
++import org.apache.atlas.odf.admin.rest.resources.AnalysesResource;
++import org.apache.atlas.odf.admin.rest.resources.AnnotationsResource;
++import org.apache.atlas.odf.admin.rest.resources.DiscoveryServicesResource;
++import org.apache.atlas.odf.admin.rest.resources.EngineResource;
++import org.apache.atlas.odf.admin.rest.resources.ImportResource;
++import org.apache.atlas.odf.admin.rest.resources.MetadataResource;
++import org.apache.atlas.odf.admin.rest.resources.SettingsResource;
++
++public class ODFAdminApp extends Application {
++	@Override
++	public Set<Class<?>> getClasses() {
++		Set<Class<?>> classes = new HashSet<Class<?>>();
++		classes.add(AnalysesResource.class);
++		classes.add(SettingsResource.class);
++		classes.add(EngineResource.class);
++		classes.add(MetadataResource.class);
++		classes.add(AnnotationsResource.class);
++		classes.add(DiscoveryServicesResource.class);
++		classes.add(ImportResource.class);
++		return classes;
++	}
++
++	@Override
++	public Set<Object> getSingletons() {
++		Set<Object> set = new HashSet<Object>();
++		set.add(new JacksonJsonProvider());
++		return set;
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
+new file mode 100755
+index 0000000..ed9010d
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
+@@ -0,0 +1,48 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest;
++
++import java.io.PrintWriter;
++import java.io.StringWriter;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++public class RestUtils {
++	public static Response createErrorResponse(Throwable t) {
++		StringWriter sw = new StringWriter();
++		PrintWriter pw = new PrintWriter(sw);
++		t.printStackTrace(pw);
++		return createErrorResponse(sw.toString());
++	}
++
++	public static Response createErrorResponse(String msg) {
++		Logger logger = Logger.getLogger(RestUtils.class.getName());
++		logger.log(Level.WARNING, "An unknown exception was thrown: ''{0}''", msg);
++		String errorMsg = "{ \"error\": \"An unknown exception occurred\"}";
++		try {
++			JSONObject errorJSON = new JSONObject();
++			errorJSON.put("error", msg);
++			errorMsg = errorJSON.write();
++		} catch (JSONException e) {
++			// do nothing, should never happen
++		}
++		return Response.status(Status.BAD_REQUEST).entity(errorMsg).build();
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java
+new file mode 100755
+index 0000000..a3bc3b2
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java
+@@ -0,0 +1,156 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.util.logging.Logger;
++
++import javax.ws.rs.Consumes;
++import javax.ws.rs.DefaultValue;
++import javax.ws.rs.GET;
++import javax.ws.rs.POST;
++import javax.ws.rs.Path;
++import javax.ws.rs.PathParam;
++import javax.ws.rs.Produces;
++import javax.ws.rs.QueryParam;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
++import org.apache.atlas.odf.api.ODFFactory;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++@Path("/analyses")
++@Api(value = "/analyses", description = "Create and view analysis requests", produces = MediaType.APPLICATION_JSON)
++public class AnalysesResource {
++	private Logger logger = Logger.getLogger(AnalysesResource.class.getName());
++
++	@GET
++	@Path("/stats")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get analysis request statistics", httpMethod = "GET", notes = "Return number of successfull and failing analysis requests", response = AnalysisRequestSummary.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getStats() {
++		try {
++			return Response.ok(JSONUtils.toJSON(new ODFFactory().create().getAnalysisManager().getAnalysisStats())).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get list of analysis requests", httpMethod = "GET", notes = "Retrieve list of recent analysis requests (from latest to oldest)", responseContainer="List", response = AnalysisRequestTrackers.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getAnalysisRequests(
++			@ApiParam(value = "Starting offset (use 0 to start with the latest request).", required = false)
++			@DefaultValue("0") @QueryParam("offset") int offset,
++			@ApiParam(value = "Maximum number of analysis requests to be returned (use -1 to retrieve all requests).", required = false)
++			@DefaultValue("10") @QueryParam("limit") int limit) {
++		try {
++			String result = JSONUtils.toJSON(new ODFFactory().create().getAnalysisManager().getAnalysisRequests(offset, limit));
++			return Response.ok(result).build();
++		} catch (Exception exc) {
++			throw new RuntimeException(exc);
++		}
++	}
++
++	@GET
++	@Path("/{requestId}")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get analysis request status", httpMethod = "GET", notes = "Show status of a specific analysis request", response = AnalysisRequestStatus.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getAnalysisStatus(
++			@ApiParam(value = "ID of the analysis request", required = true)
++			@PathParam("requestId") String requestId) {
++		logger.entering(AnalysesResource.class.getName(), "getAnalysisStatus");
++		AnalysisRequestStatus analysisRequestStatus = new ODFFactory().create().getAnalysisManager().getAnalysisRequestStatus(requestId);
++		try {
++			return Response.ok(JSONUtils.toJSON(analysisRequestStatus)).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@POST
++	@Produces(MediaType.APPLICATION_JSON)
++	@Consumes(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Run analysis", httpMethod = "POST", notes = "Create and run new analysis request", response = AnalysisResponse.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response startAnalysis(@ApiParam(value = "Analysis request to be started", required = true) AnalysisRequest request) {
++		logger.entering(AnalysesResource.class.getName(), "startAnalysis");
++		try {
++			AnalysisResponse analysisResponse = new ODFFactory().create().getAnalysisManager().runAnalysis(request);
++			return Response.ok(JSONUtils.toJSON(analysisResponse)).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@POST
++	@Path("/{requestId}/cancel")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Cancel analysis request", httpMethod = "POST", notes = "Cancel a queued analysis request that has not been started yet", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request - The request with the provided id could not be found"),
++			@ApiResponse(code = 403, message = "Forbidden - The status of the analysis request does not allow for cancellation")
++	})
++	public Response cancelAnalysisRequest(@ApiParam(value = "ID of the analysis request", required = true) @PathParam("requestId") String requestId) {
++		logger.entering(AnalysesResource.class.getName(), "cancelAnalysisRequest");
++		AnalysisCancelResult result = new ODFFactory().create().getAnalysisManager().cancelAnalysisRequest(requestId);
++		if (result.getState() == AnalysisCancelResult.State.NOT_FOUND) {
++			return Response.status(Status.BAD_REQUEST).build();
++		} else if (result.getState() == AnalysisCancelResult.State.INVALID_STATE) {
++			return Response.status(Status.FORBIDDEN).build();
++		}
++		return Response.ok().build();
++	}
++
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java
+new file mode 100755
+index 0000000..704b004
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java
+@@ -0,0 +1,130 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import javax.ws.rs.GET;
++import javax.ws.rs.POST;
++import javax.ws.rs.Path;
++import javax.ws.rs.PathParam;
++import javax.ws.rs.Produces;
++import javax.ws.rs.QueryParam;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
++import org.apache.atlas.odf.api.annotation.Annotations;
++import org.apache.atlas.odf.json.JSONUtils;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++@Path("/annotations")
++@Api(value = "/annotations", description = "Create and query ODF annotations", produces = MediaType.APPLICATION_JSON)
++public class AnnotationsResource {
++
++	Logger logger = Logger.getLogger(AnnotationsResource.class.getName());
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Retrieve annotations", httpMethod = "GET", notes = "Retrieve annotations for an asset and/or for a specific analysis request.", response = Annotations.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response retrieveAnnotationsForAsset(@ApiParam(value = "Reference ID of the asset", required = false) @QueryParam("assetReference") String assetReference,
++			@ApiParam(value = "Analysis request ID", required = false) @QueryParam("analysisRequestId") String analysisRequestId) {
++		try {
++			MetaDataObjectReference ref = null;
++			if (assetReference != null) {
++				ref = new MetaDataObjectReference();
++				String repoId = new ODFFactory().create().getMetadataStore().getRepositoryId();
++				ref.setRepositoryId(repoId);
++				ref.setId(assetReference);
++			}
++			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++			List<Annotation> annots = as.getAnnotations(ref, analysisRequestId);
++			Annotations result = new Annotations();
++			result.setAnnotations(annots);
++			return Response.ok(JSONUtils.toJSON(result)).build();
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An exception occurred while retrieving annotations", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++	}
++
++
++	@GET
++	@Path("/objects/{objectReference}")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Retrieve annotation", httpMethod = "GET", notes = "Retrieve annotation by Id.", response = Annotation.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response retrieveAnnotation(@ApiParam(value = "Reference ID of the annotation", required = true) @PathParam("objectReference") String objectReference) {
++		try {
++			MetaDataObjectReference ref = new MetaDataObjectReference();
++			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++			ref.setRepositoryId(as.getRepositoryId());
++			ref.setId(objectReference);
++			Annotation annot = as.retrieveAnnotation(ref);
++			return Response.ok(JSONUtils.toJSON(annot)).build();
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An exception occurred while retrieving annotation", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++	}
++
++
++	// no swagger documentation as this will be replaced by "annotation propagation"
++	@GET
++	@Path("/newestAnnotations/{assetReference}")
++	@Produces(MediaType.APPLICATION_JSON)
++	public Response retrieveMostRecentAnnotations(@PathParam("assetReference") String assetReference) {
++		try {
++			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
++			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++			List<Annotation> annotations = AnnotationStoreUtils.getMostRecentAnnotationsByType(as, ref);
++			String result = JSONUtils.toJSON(annotations);
++			return Response.ok(result).build();
++		} catch (Exception e) {
++			logger.log(Level.WARNING, "An exception occurred while retrieving most recent annotations", e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@POST
++	@ApiOperation(value = "Create annotation", httpMethod = "POST", notes = "Create new annotation object", response = MetaDataObjectReference.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response createAnnotation(@ApiParam(value = "Analysis request to be started", required = true) String annotString) {
++		try {
++			Annotation annot = JSONUtils.fromJSON(annotString, Annotation.class);
++			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++			MetaDataObjectReference annotRef = as.store(annot);
++			return Response.status(Status.CREATED).entity(JSONUtils.toJSON(annotRef)).build();
++		} catch (Exception exc) {
++			logger.log(Level.WARNING, "An exception occurred while storing an annotation", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++	}
++
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java
+new file mode 100755
+index 0000000..bd01e60
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java
+@@ -0,0 +1,341 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.io.InputStream;
++import java.util.List;
++import java.util.logging.Logger;
++
++import javax.ws.rs.Consumes;
++import javax.ws.rs.DELETE;
++import javax.ws.rs.GET;
++import javax.ws.rs.POST;
++import javax.ws.rs.PUT;
++import javax.ws.rs.Path;
++import javax.ws.rs.PathParam;
++import javax.ws.rs.Produces;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
++import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
++import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
++import org.apache.atlas.odf.json.JSONUtils;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++@Path("/services")
++@Api(value = "/services", description = "Manage ODF services", produces = MediaType.APPLICATION_JSON)
++public class DiscoveryServicesResource {
++	private Logger logger = Logger.getLogger(DiscoveryServicesResource.class.getName());
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get list of discovery services", httpMethod = "GET", notes = "Retrieve list of all discovery services registered in ODF", responseContainer="List", response = DiscoveryServiceProperties.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getDiscoveryServices() {
++		logger.entering(DiscoveryServicesResource.class.getName(), "getServices");
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response;
++		List<DiscoveryServiceProperties> dsProperties = dsAdmin.getDiscoveryServicesProperties();
++		try {
++			String json = JSONUtils.toJSON(dsProperties);
++			response = Response.ok(json).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			response = RestUtils.createErrorResponse(e);
++		}
++		return response;
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@Path("/status")
++	@ApiOperation(value = "Get status of discovery services", httpMethod = "GET", notes = "Retrieve status overview of all discovery services registered in ODF", responseContainer="List", response = ServiceStatusCount.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 404, message = "Not found"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getAllServicesStatus() {
++		logger.entering(DiscoveryServicesResource.class.getName(), "getAllServicesStatus");
++		List<ServiceStatusCount> servicesStatus = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServiceStatusOverview();
++		if (servicesStatus == null) {
++			return Response.status(Status.NOT_FOUND).build();
++		}
++		String json;
++		try {
++			json = JSONUtils.toJSON(servicesStatus);
++		} catch (JSONException e) {
++			throw new RuntimeException(e);
++		}
++		return Response.ok(json).build();
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@Path("/{serviceId}/status")
++	@ApiOperation(value = "Get discovery service status", httpMethod = "GET", notes = "Retrieve status of a discovery service that is registered in ODF", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 404, message = "Not found"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getDiscoveryServiceStatus(
++			@ApiParam(value = "Discovery service ID", required = true)
++			@PathParam("serviceId") String serviceId) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceStatus");
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response;
++		try {
++			DiscoveryServiceStatus dsStatus = dsAdmin.getDiscoveryServiceStatus(serviceId);
++			if (dsStatus == null) {
++				response = Response.status(Status.NOT_FOUND).build();
++			}
++			else {
++				try {
++					String json = JSONUtils.toJSON(dsStatus);
++					response = Response.ok(json).build();
++				} catch (JSONException e) {
++					e.printStackTrace();
++					logger.info("Parse exception " + e);
++					response = RestUtils.createErrorResponse(e);
++				}
++			}
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		return response;
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@Path("/{serviceId}/runtimeStats")
++	@ApiOperation(value = "Get runtime statistics of a discovery service", httpMethod = "GET", notes = "Retrieve the runtime statistics of a discovery service that is registered in ODF.", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 404, message = "Not found"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getDiscoveryServiceRuntimeStats(
++			@ApiParam(value = "Discovery service ID", required = true)
++			@PathParam("serviceId") String serviceId) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceRuntimeStats");
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response;
++		try {
++			DiscoveryServiceRuntimeStatistics dsRuntimeStats = dsAdmin.getDiscoveryServiceRuntimeStatistics(serviceId);
++			String json = JSONUtils.toJSON(dsRuntimeStats);
++			response = Response.ok(json).build();
++		}
++		catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			response = RestUtils.createErrorResponse(e);
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		return response;
++	}
++
++	@DELETE
++	@Path("/{serviceId}/runtimeStats")
++	@ApiOperation(value = "Delete runtime statistics of a discovery service", httpMethod = "DELETE", notes = "Delete the runtime statistics of a discovery service that is registered in ODF.", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 404, message = "Not found"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response deleteDiscoveryServiceRuntimeStats(
++			@ApiParam(value = "Discovery service ID", required = true)
++			@PathParam("serviceId") String serviceId) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "deleteDiscoveryServiceRuntimeStats");
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response;
++		try {
++			dsAdmin.deleteDiscoveryServiceRuntimeStatistics(serviceId);
++			response = Response.ok().build();
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		return response;
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@Path("/{serviceId}")
++	@ApiOperation(value = "Get properties of a discovery service registered in ODF", httpMethod = "GET", notes = "Retrieve properties of a discovery service that is registered in ODF", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 404, message = "Not found"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getDiscoveryServiceProperties(
++			@ApiParam(value = "Id string of discovery service", required = true)
++			@PathParam("serviceId") String serviceId) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceProperties");
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response;
++		try {
++			DiscoveryServiceProperties dsStatus = dsAdmin.getDiscoveryServiceProperties(serviceId);
++			if (dsStatus == null) {
++				response = Response.status(Status.NOT_FOUND).build();
++			}
++			else {
++				try {
++					String json = JSONUtils.toJSON(dsStatus);
++					response = Response.ok(json).build();
++				} catch (JSONException e) {
++					e.printStackTrace();
++					logger.info("Parse exception " + e);
++					response = RestUtils.createErrorResponse(e);
++				}
++			}
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		return response;
++	}
++
++	@POST
++	@Consumes(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Register a discovery service", httpMethod = "POST", notes = "Register a new service in ODF", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response registerDiscoveryService(
++			@ApiParam(value = "ODF service definition", required = true) DiscoveryServiceProperties dsProperties) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "registerDiscoveryService");
++		Response response;
++		try {
++			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++			dsAdmin.createDiscoveryService(dsProperties);
++			response = Response.ok().build();
++		} catch (ValidationException e) {
++			e.printStackTrace();
++			logger.info("Validation exception during setting of property " + e.getProperty());
++			response = RestUtils.createErrorResponse(e.getErrorCause());
++		}
++		return response;
++	}
++
++	@PUT
++	@Consumes(MediaType.APPLICATION_JSON)
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Update properties of a discovery service", httpMethod = "POST", notes = "Update properties of a discovery service that is registered in ODF", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response updateDiscoveryService(
++			@ApiParam(value = "ODF service definition", required = true) DiscoveryServiceProperties dsProperties) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "updateDiscoveryService");
++		Response response;
++		try {
++			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++			dsAdmin.replaceDiscoveryService(dsProperties);
++			response = Response.ok().build();
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		catch (ValidationException e) {
++			e.printStackTrace();
++			logger.info("Validation exception during setting of property " + e.getProperty());
++			response = RestUtils.createErrorResponse(e.getErrorCause());
++		}
++		return response;
++	}
++
++	@DELETE
++	@Path("/{serviceId}")
++	@ApiOperation(value = "Delete a discovery service", httpMethod = "DELETE", notes = "Remove a registered service from ODF", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response deleteDiscoveryService(
++			@ApiParam(value = "Id string of discovery service to be deleted", required = true)
++			@PathParam("serviceId") String serviceId) {
++		logger.entering(DiscoveryServicesResource.class.getName(), "deleteDiscoveryService");
++		Response response;
++		try {
++			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++			dsAdmin.deleteDiscoveryService(serviceId);
++			response = Response.ok().build();
++		}
++		catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		catch (ValidationException e) {
++			e.printStackTrace();
++			logger.info("Validation exception during deletion. Property: " + e.getProperty());
++			response = RestUtils.createErrorResponse(e.getErrorCause());
++		}
++		return response;
++	}
++
++	@GET
++	@Path("/{serviceId}/image")
++	@Produces("image/*")
++	@ApiOperation(value = "Get a discovery service logo", httpMethod = "GET", notes = "Retrieve image representing a discovery service", response = InputStream.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 404, message = "Not found"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getImage(
++			@ApiParam(value = "ID of discovery service", required = true)
++			@PathParam("serviceId") String serviceId) {
++
++		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
++		Response response = null;
++		InputStream is;
++		try {
++			is = dsAdmin.getDiscoveryServiceImage(serviceId);
++			if (is == null) {
++				// should never happen
++				response = Response.status(Status.NOT_FOUND).build();
++			}
++			else {
++				response = Response.ok(is, "image/png").build();
++			}
++		} catch (ServiceNotFoundException snfe) {
++			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
++		}
++		return response;
++	}
++
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java
+new file mode 100755
+index 0000000..d6cd37d
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java
+@@ -0,0 +1,167 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.io.IOException;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import javax.ws.rs.Consumes;
++import javax.ws.rs.GET;
++import javax.ws.rs.POST;
++import javax.ws.rs.Path;
++import javax.ws.rs.Produces;
++import javax.ws.rs.QueryParam;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.atlas.odf.api.engine.SystemHealth;
++import org.apache.atlas.odf.api.utils.ODFLogConfig;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.admin.log.LoggingHandler;
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.engine.ODFEngineOptions;
++import org.apache.atlas.odf.api.engine.ODFStatus;
++import org.apache.atlas.odf.api.engine.ODFVersion;
++import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
++import org.apache.atlas.odf.json.JSONUtils;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++@Path("/engine")
++@Api(value = "/engine", description = "Monitor and control the ODF engine", produces = MediaType.APPLICATION_JSON)
++public class EngineResource {
++	final static LoggingHandler REST_LOG_HANDLER = new LoggingHandler();
++
++	static {
++		//initialize log config and log handler to cache logs
++		ODFLogConfig.run();
++		Logger rootLogger = Logger.getLogger("org.apache.atlas.odf");
++		REST_LOG_HANDLER.setLevel(Level.ALL);
++		rootLogger.addHandler(REST_LOG_HANDLER);
++	}
++
++	private Logger logger = Logger.getLogger(EngineResource.class.getName());
++
++	@POST
++	@Path("shutdown")
++	@Consumes(MediaType.APPLICATION_JSON)
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Shutdown ODF engine", httpMethod = "POST", notes = "Shutdown ODF engine, purge all scheduled analysis requests from the queues, and cancel all running analysis requests (for debugging purposes only)", response = Response.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response shutdown(@ApiParam(value = "Engine options", defaultValue = "false", required = true) ODFEngineOptions engineOptions) {
++		logger.entering(EngineResource.class.getName(), "shutdown");
++		logger.log(Level.INFO, "Restart option is ", engineOptions.isRestart());
++		new ODFFactory().create().getEngineManager().shutdown(engineOptions);
++		return Response.ok().build();
++	}
++
++	@GET
++	@Path("health")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get health status", httpMethod = "GET", notes = "Check the health status of ODF", response = SystemHealth.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response healthCheck() {
++		logger.entering(EngineResource.class.getName(), "healthCheck");
++		SystemHealth health = new ODFFactory().create().getEngineManager().checkHealthStatus();
++		Status status = Status.OK;
++		try {
++			return Response.status(status).entity(JSONUtils.toJSON(health)).type(MediaType.APPLICATION_JSON).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("runtimes")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get info about the available runtimes", httpMethod = "GET", notes = "Get information about all runtimes running discovery services", response = SystemHealth.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getRuntimesInfo() {
++		logger.entering(EngineResource.class.getName(), "getRuntimesInfo");
++		ServiceRuntimesInfo sri = new ODFFactory().create().getEngineManager().getRuntimesInfo();
++		Status status = Status.OK;
++		try {
++			return Response.status(status).entity(JSONUtils.toJSON(sri)).type(MediaType.APPLICATION_JSON).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		} finally {
++			logger.exiting(EngineResource.class.getName(), "getRuntimesInfo");
++		}
++	}
++
++	@GET
++	@Path("status")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get current status", httpMethod = "GET", notes = "Retrieve status of the messaging subsystem and the internal thread manager", response = ODFStatus.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getStatus() throws IOException {
++		logger.entering(EngineResource.class.getName(), "getStatus");
++		try {
++			ODFStatus odfStatus = new ODFFactory().create().getEngineManager().getStatus();
++			return Response.status(Status.OK).entity(JSONUtils.toJSON(odfStatus)).type(MediaType.APPLICATION_JSON).build();
++		} catch (Exception exc) {
++			logger.log(Level.INFO, "An exception occurred while getting the request status", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++	}
++
++	@GET
++	@Path("log")
++	@Produces(MediaType.TEXT_PLAIN)
++	@ApiOperation(value = "Get current application log", httpMethod = "GET", notes = "Retrieve logs of the ODF instance", response = ODFStatus.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getLog(@QueryParam("numberOfLogs") Integer numberOfLogs, @QueryParam("logLevel") String logLevel) throws IOException {
++		logger.entering(EngineResource.class.getName(), "getLog");
++		try {
++			Level level = Level.ALL;
++			if (logLevel != null) {
++				level = Level.parse(logLevel);
++			}
++			return Response.status(Status.OK).entity(REST_LOG_HANDLER.getFormattedCachedLog(numberOfLogs, level)).type(MediaType.TEXT_PLAIN).build();
++		} catch (Exception exc) {
++			logger.log(Level.INFO, "An exception occurred while getting the ODF log", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++	}
++
++	@GET
++	@Path("version")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get the ODF build version", httpMethod = "GET", notes = "The version is of the form versionnumber-buildid, e.g., 0.1.0-154", response = ODFVersion.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getVersion() {
++		try {
++			ODFVersion version = new ODFFactory().create().getEngineManager().getVersion();
++			Status status = Status.OK;
++			return Response.status(status).entity(JSONUtils.toJSON(version)).type(MediaType.APPLICATION_JSON).build();
++		} catch (Exception exc) {
++			logger.log(Level.INFO, "An exception occurred while getting the version", exc);
++			return RestUtils.createErrorResponse(exc);
++		}
++
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java
+new file mode 100755
+index 0000000..ef489a8
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java
+@@ -0,0 +1,89 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.util.logging.Logger;
++
++import javax.ws.rs.Consumes;
++import javax.ws.rs.POST;
++import javax.ws.rs.Path;
++import javax.ws.rs.Produces;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
++import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.importer.MetadataImportException;
++import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
++import org.apache.atlas.odf.json.JSONUtils;
++
++@Path("/import")
++public class ImportResource {
++	private Logger logger = Logger.getLogger(ImportResource.class.getName());
++
++	@POST
++	@Consumes(MediaType.APPLICATION_JSON)
++	@Produces(MediaType.APPLICATION_JSON)
++	public Response doImport(String parameterString) {
++		logger.entering(ImportResource.class.getName(), "import");
++		try {
++			JSONObject parameter = new JSONObject(parameterString);
++
++			Object jdbcObj = parameter.get("jdbcString");
++			Object userObj = parameter.get("user");
++			Object passwordObj = parameter.get("password");
++			Object dbObj = parameter.get("database");
++			Object schemaObj = parameter.get("schema");
++			Object tableObj = parameter.get("table");
++
++			if (jdbcObj == null || userObj == null || passwordObj == null) {
++				return RestUtils.createErrorResponse("jdbcString, user, password, database, schema and table are required!");
++			}
++
++			String user = (String) userObj;
++			String password = (String) passwordObj;
++			String jdbcString = (String) jdbcObj;
++			String db = (String) dbObj;
++			String schema = (String) schemaObj;
++			String table = (String) tableObj;
++
++			JDBCMetadataImporter importer = new ODFFactory().create().getJDBCMetadataImporter();
++			JDBCConnection conn = new JDBCConnection();
++			conn.setJdbcConnectionString(jdbcString);
++			conn.setUser(user);
++			conn.setPassword(password);
++
++			JDBCMetadataImportResult result = null;
++			try {
++				result = importer.importTables(conn, db, schema, table);
++			} catch (MetadataImportException ex) {
++				return RestUtils.createErrorResponse(ex.getMessage());
++			}
++
++			if (result == null) {
++				return Response.serverError().build();
++			}
++
++			return Response.ok(JSONUtils.toJSON(result)).build();
++		} catch (JSONException e) {
++			return RestUtils.createErrorResponse(e.getMessage());
++		}
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java
+new file mode 100755
+index 0000000..9daf09a
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java
+@@ -0,0 +1,246 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import java.text.MessageFormat;
++import java.util.ArrayList;
++import java.util.Hashtable;
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import javax.ws.rs.GET;
++import javax.ws.rs.POST;
++import javax.ws.rs.Path;
++import javax.ws.rs.PathParam;
++import javax.ws.rs.Produces;
++import javax.ws.rs.QueryParam;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.wink.json4j.JSONException;
++import org.apache.wink.json4j.JSONObject;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.json.JSONUtils;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++@Path("/metadata")
++@Api(value = "/metadata", description = "Populate and query metadata repository", produces = MediaType.APPLICATION_JSON)
++public class MetadataResource {
++	private Logger logger = Logger.getLogger(MetadataResource.class.getName());
++
++	@GET
++	@Path("/connectiontest")
++	public Response testConnection() {
++		try {
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			MetadataStore.ConnectionStatus status = mds.testConnection();
++			switch (status) {
++			case OK:
++				return Response.ok().build();
++			case AUTHORIZATION_FAILED:
++				return Response.status(Status.UNAUTHORIZED).build();
++			case UNREACHABLE:
++				return Response.status(Status.NOT_FOUND).build();
++			default:
++				return Response.status(Status.INTERNAL_SERVER_ERROR).build();
++			}
++		} catch (Exception e) {
++			logger.log(Level.WARNING, "An exception occurred while getting metatdata store properties", e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get metadata store properties", httpMethod = "GET", notes = "Retrieve type and URL of underlying metadata store", response = Response.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getMetadataStoreProperties() {
++		try {
++			JSONObject result = new JSONObject();
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			Hashtable<Object, Object> propertyHashtable = (Hashtable<Object, Object>) mds.getProperties();
++			for (Object propKey : propertyHashtable.keySet()) {
++				result.put((String) propKey, (String) propertyHashtable.get(propKey));
++			}
++			String s = result.write();
++			return Response.ok(s).build();
++		} catch (Exception e) {
++			logger.log(Level.WARNING, "An exception occurred while getting metatdata store properties", e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("/referencetypes")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Get list of available reference types", httpMethod = "GET", notes = "Retrieve list of supported metadata object reference types", responseContainer="List", response = MetaDataObject.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response getReferenceTypes() {
++		JSONObject result = new JSONObject();
++		List<String> referenceTypes = null;
++		try {
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			referenceTypes = mds.getReferenceTypes();
++			result = JSONUtils.toJSONObject(referenceTypes);
++			return Response.ok(result.write()).build();
++		} catch (JSONException e) {
++			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + referenceTypes);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("/asset/{assetReference}")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Retrieve asset by reference", httpMethod = "GET", notes = "Retrieve object from metadata repository", response = MetaDataObject.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response retrieveAsset(@ApiParam(value = "Metadata object reference id", required = true) @PathParam("assetReference") String assetReference) {
++		JSONObject result;
++		try {
++			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			MetaDataObject mdo = mds.retrieve(ref);
++			if (mdo != null) {
++				result = JSONUtils.toJSONObject(mdo);
++			} else {
++				// Return empty JSON document to indicate that the result should be null.
++				result = new JSONObject();
++			}
++			return Response.ok(result.write()).build();
++		} catch (JSONException e) {
++			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + assetReference);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("/asset/{assetReference}/{referenceType}")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Retrieve objects referenced by an asset", httpMethod = "GET", notes = "Retrieve referenced metadata objects by reference type", responseContainer="List", response = MetaDataObject.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response retrieveAssetReferences(
++			@ApiParam(value = "Metadata object reference", required = true) @PathParam("assetReference") String assetReference,
++			@ApiParam(value = "Reference type name (including 'PARENT' and 'CHILDREN')", required = true) @PathParam("referenceType") String referenceType) {
++		try {
++			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			List<MetaDataObject> referencedObjects = new ArrayList<MetaDataObject>();
++			if (InternalMetaDataUtils.ODF_PARENT_REFERENCE.equals(referenceType.toUpperCase())) {
++				MetaDataObject parent = mds.getParent(mds.retrieve(ref));
++				if (parent != null) {
++					referencedObjects.add(parent);
++				}
++			} else if (InternalMetaDataUtils.ODF_CHILDREN_REFERENCE.toString().equals(referenceType.toUpperCase())) {
++				referencedObjects = mds.getChildren(mds.retrieve(ref));
++			} else {
++				referencedObjects = mds.getReferences(referenceType.toUpperCase(), mds.retrieve(ref));
++			}
++			List<JSONObject> jsons = new ArrayList<JSONObject>();
++			for (MetaDataObject obj : referencedObjects) {
++				jsons.add(JSONUtils.toJSONObject(obj));
++			}
++			String result = JSONUtils.toJSON(jsons);
++			logger.log(Level.FINE, "Serialized JSON: {0}", result);
++			return Response.ok(result).build();
++		} catch (JSONException e) {
++			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + assetReference);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("/sampledata")
++	@ApiOperation(value = "Create sample data", httpMethod = "GET", notes = "Populate metadata repository with ODF sample metadata", response = Response.class)
++	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
++	public Response createSampleData() {
++		try {
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			mds.createSampleData();
++			return Response.ok().build();
++		} catch (Exception exc) {
++			exc.printStackTrace();
++			throw new RuntimeException(exc);
++		}
++	}
++
++	@POST
++	@Path("/resetalldata")
++	public Response resetAllData() {
++		try {
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			mds.resetAllData();
++			return Response.ok().build();
++		} catch (Exception e) {
++			logger.log(Level.WARNING, "An exception occurred while resetting metatdata store", e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@GET
++	@Path("/search")
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Query metadata repository", httpMethod = "GET", notes = "Search for objects in metadata repository", responseContainer="List", response = MetaDataObjectReference.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error") })
++	public Response search(@ApiParam(value = "Query to be sent to metadata repository (refer to Atlas query notation)", required = true) @QueryParam("query") String query,
++			@ApiParam(value = "Type of results to be returned, 'objects' vs. 'references'", required = false) @QueryParam("resulttype") String resultType) {
++		List<MetaDataObjectReference> queryResults;
++		try {
++			MetadataStore mds = new ODFFactory().create().getMetadataStore();
++			try {
++				queryResults = mds.search(query);
++			} catch(MetadataStoreException e) {
++				logger.log(Level.WARNING, MessageFormat.format("Error processing query ''{0}''.", query), e);
++				return Response.status(Status.BAD_REQUEST).build();
++			}
++			List<JSONObject> jsons = new ArrayList<JSONObject>();
++			if ((resultType != null) && resultType.equals("references")) {
++				for (MetaDataObjectReference ref : queryResults) {
++					jsons.add(JSONUtils.toJSONObject(ref));
++				}
++			} else {
++				// TODO very slow, retrieve results in bulk ?!?
++				//FIXME serialization of each object on its own is necessary because of a jackson issue (https://github.com/FasterXML/jackson-databind/issues/336)
++				//this should be replaced by a custom objectmapper initialization, issue #59 in gitlab
++				for (MetaDataObjectReference ref : queryResults) {
++					MetaDataObject retrievedMdo = mds.retrieve(ref);
++					jsons.add(JSONUtils.toJSONObject(retrievedMdo));
++				}
++			}
++			String result = JSONUtils.toJSON(jsons);
++			logger.log(Level.FINE, "Serialized JSON: {0}", result);
++			return Response.ok(result).build();
++		} catch (Exception exc) {
++			exc.printStackTrace();
++			throw new RuntimeException(exc);
++		}
++	}
++}
+diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java
+new file mode 100755
+index 0000000..e203774
+--- /dev/null
++++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java
+@@ -0,0 +1,128 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.admin.rest.resources;
++
++import io.swagger.annotations.Api;
++import io.swagger.annotations.ApiOperation;
++import io.swagger.annotations.ApiParam;
++import io.swagger.annotations.ApiResponse;
++import io.swagger.annotations.ApiResponses;
++
++import java.text.MessageFormat;
++import java.util.HashMap;
++import java.util.logging.Logger;
++
++import javax.ws.rs.Consumes;
++import javax.ws.rs.POST;
++import javax.ws.rs.GET;
++import javax.ws.rs.PUT;
++import javax.ws.rs.Path;
++import javax.ws.rs.Produces;
++import javax.ws.rs.core.MediaType;
++import javax.ws.rs.core.Response;
++import javax.ws.rs.core.Response.Status;
++
++import org.apache.atlas.odf.admin.rest.RestUtils;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.settings.SettingsManager;
++import org.apache.atlas.odf.api.settings.validation.ValidationException;
++import org.apache.atlas.odf.json.JSONUtils;
++import org.apache.wink.json4j.JSONException;
++
++import org.apache.atlas.odf.api.ODFFactory;
++
++@Path("/settings")
++@Api(value = "/settings", description = "View or update the settings of the Open Discovery Framework", produces = MediaType.APPLICATION_JSON)
++public class SettingsResource {
++
++	private Logger logger = Logger.getLogger(SettingsResource.class.getName());
++
++	@GET
++	@Produces(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Retrieve settings", httpMethod = "GET", notes = "Retrieve current ODF settings", response = ODFSettings.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response getSettings() {
++		logger.entering(SettingsResource.class.getName(), "getConfig");
++		try {
++			return Response.ok(JSONUtils.toJSON(new ODFFactory().create().getSettingsManager().getODFSettingsHidePasswords()), MediaType.APPLICATION_JSON).build();
++		} catch (JSONException e) {
++			e.printStackTrace();
++			logger.info("Parse exception " + e);
++			return RestUtils.createErrorResponse(e);
++		}
++	}
++
++	@POST
++	@Path("/reset")
++	@Produces(MediaType.APPLICATION_JSON)
++	@Consumes(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Reset settings", httpMethod = "POST", notes = "Reset ODF settings to the default", response = Response.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response resetSettings() {
++		logger.entering(SettingsResource.class.getName(), "getConfig");
++		new ODFFactory().create().getSettingsManager().resetODFSettings();
++		return Response.ok().build();
++	}
++
++	@PUT
++	@Produces(MediaType.APPLICATION_JSON)
++	@Consumes(MediaType.APPLICATION_JSON)
++	@ApiOperation(value = "Update settings", httpMethod = "PUT", notes = "Update ODF settings", response = ODFSettings.class)
++	@ApiResponses(value = {
++			@ApiResponse(code = 200, message = "OK"),
++			@ApiResponse(code = 400, message = "Bad Request"),
++			@ApiResponse(code = 500, message = "Internal server error")
++	})
++	public Response changeSettings(@ApiParam(value = "ODF configuration options", required = true) ODFSettings odfConfig) {
++		logger.entering(SettingsResource.class.getName(), "changeConfig");
++		if (odfConfig == null) {
++			return Response.status(Status.BAD_REQUEST).entity("The body must be a valid settings JSON.").build();
++		}
++
++		try {
++			SettingsManager config = new ODFFactory().create().getSettingsManager();
++			config.updateODFSettings(odfConfig);
++			return Response.ok(JSONUtils.toJSON(config.getODFSettingsHidePasswords())).build();
++		} catch (ValidationException e) {
++			e.printStackTrace();
++			logger.info("Validation exception during setting of property " + e.getProperty());
++			return RestUtils.createErrorResponse(e);
++		} catch (JSONException e1) {
++			e1.printStackTrace();
++			return RestUtils.createErrorResponse(MessageFormat.format("The provided input is not valid JSON in form {0}", getEmptyODFConfig()));
++		}
++	}
++
++	private String getEmptyODFConfig() {
++		ODFSettings odf = new ODFSettings();
++		odf.setUserDefined(new HashMap<String, Object>());
++		String emptyJSON = "";
++		try {
++			emptyJSON = JSONUtils.toJSON(odf);
++		} catch (JSONException e2) {
++			e2.printStackTrace();
++		}
++		return emptyJSON;
++	}
++
++
++
++}
+diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png
+new file mode 100755
+index 0000000000000000000000000000000000000000..fabcc37211ac744ffcfaeda19ab8137c2d42d420
+GIT binary patch
+literal 322
+zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE1|*BCs=fdz&H|6fVg?3oVGw3ym^DWNC@5Lt
+z8c`CQpH@<ySd_}(n3A8As^FQMn4TJxnwU~qcrw)nsOY?>i(^Q|tvAyd`I;4YTvvW{
+zu+?(E9k|1h{ghaeNL0>oq0Ar^uRr=*mU6i~al7@1?KVrqWd;UkA;o9%X9Rp2{;y~<
+z(ymvG3cH*1p~y#X<<YmB{7>HfnBi%Wa{Sb;_4A}06#Z`MD_&vbd-Tx2J-9)*&}Mf!
+zQ-E-mO*XU3=Ux}5V`0;?ZKM^ti{oV^7Q~4(-CHZ#(kIGPShSMq%arL>4PP1z87IhW
+zv}M@9yVLsWu7}GH9x>Hln>XXj4F<8d7BAO^*Zwk$cQN?SE6@KyEP-LQRic~3?kTr`
+PUSsfd^>bP0l+XkKh#hrk
+
+literal 0
+HcmV?d00001
+
+diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png
+new file mode 100755
+index 0000000000000000000000000000000000000000..1f3744b22d1ba7969d2e4422f87db6f8911b48e3
+GIT binary patch
+literal 467
+zcmV;^0WAKBP)<h;3K|Lk000e1NJLTq001BW001Be1^@s6b9#F800009a7bBm000XU
+z000XU0RWnu7ytkO8FWQhbW?9;ba!ELWdK2BZ(?O2No`?gWm08fWO;GPWjp`?0bof)
+zK~#9!)K`lUf-nrVPQV5-f+IKr8^{QZ!U(tlBj^UWLD_%}u=T=Su2)Ku2043|<4j4K
+zzLyW0d7e*4E|U|0G3IORe3-ut;4D12a$?bM>vf_CJOUhX%O5naNX7NXrT}h!S=k<)
+zzjD_cSa^r^fae_{YIe_oL(5Q`0to_*a8IWS0I@bw0j<QBGzV@d2_J!O_dZbKx)1t7
+z$@V7*pn=0Cq?<t3{2~AghjmETfzF0#sRrQIHP&_m8m^$L0hvxSfHs6bLW4)6M*&SK
+zB&6^^A?vXWk<uKtXpq9cB@;CvMerjeuvkPxt6l{Z=cydY3Luu{oGQ_1#TA-%L!ztt
+zy)m%KZ^fKW7weQ+6mWZeI4d+LSfG4614}^Ezw;*3uyLCL-KG%?EH6b40{8|!W6Jj{
+zKG>gRi}8yBe?Y^FL<2GVYr)vADb+y9fqIL_l&$(dBklDOU;qMEXIu=+D2xCA002ov
+JPDHLkV1hC*x4HlT
+
+literal 0
+HcmV?d00001
+
+diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png
+new file mode 100755
+index 0000000000000000000000000000000000000000..59a7ff85ede850df81a1fc9dd870cee678137cf9
+GIT binary patch
+literal 224
+zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE1|*BCs=fdz&H|6fVg?3oVGw3ym^DWNC@5Lt
+z8c`CQpH@<ySd_}(n3A8As^FQMn4TJxnwU~qcrw)ns3_Od#WAGf)|+rgJ_Z4v;E(_H
+z`|r96h-9AkD4i>HM#n&qSyHKhosnUg(1r<VW?Q|&?uq_c@9=ipX18gam)A8{_wMR-
+z$Tz>U{J<)?`f$cd2d+7OuDh5%>^vae|LjM7;*lRScD=fgp~jTM6xqPYz_!%QTdQ&7
+QUZBMcp00i_>zopr0L39oCIA2c
+
+literal 0
+HcmV?d00001
+
+diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png
+new file mode 100755
+index 0000000000000000000000000000000000000000..4b9bcd31266b5ef1bf6d7719d603a22cd22d467e
+GIT binary patch
+literal 562
+zcmV-20?qx2P)<h;3K|Lk000e1NJLTq001BW001Be1^@s6b9#F800009a7bBm000XU
+z000XU0RWnu7ytkO8FWQhbW?9;ba!ELWdK2BZ(?O2No`?gWm08fWO;GPWjp`?0l!H^
+zK~#9!)K}|q!ypVUz5cI9U<8lg5i&wX@CJ1^@CX^fBhU>zg0lggz?JUAoucp%4!yM8
+z=}16GA3`siecxZ6IbLg+jWHHz2HJpjK^yJa$hjV9i3&vvln6!#bU}6{A#X=#5s(k-
+zcd`}Ad!UU8M`1J2AZQkR_`4?=9Q;WQK%hsO!p$;gCIQyc`Wk@M09eJ1I^~lM4EV~{
+zf~<2905Y)yBY{~fK%AvQ7><J~0SNEum?PGLaNO+p{tT2&0wBi=jDv^<H3(t$;E*@Y
+zn92hK%~xP@EMz^~a}e8IE^pA;EB!su{6%x0OGH8N77<znA|~T-CVT)yTL3LVlmOTU
+zAbOg;x)!9OpinFBqC*n}>O61+#Nj-U%4C%w#)mSYs0Cj06buBfipOd}sv0Vkx2uMU
+zeqhV=1HM^5K#J!E9T7h_t2!c59&WRU_DXg>(lVBTUfBK;)s?ZpMZD#ttboA}e9R(}
+zOs*}5%!YzK*wQ!M%0-HdC11c;e`o8TpkvCVQ%xk)yg$FSEy~x#HUl!!?R`X{r@c|5
+zSoH*d?#t*m##`m5*{<vr%)ha~(*Zo)34a6_03qXnO$-^iNdN!<07*qoM6N<$f<Jla
+AmjD0&
+
+literal 0
+HcmV?d00001
+
+diff --git a/odf/odf-web/src/main/webapp/.gitignore b/odf/odf-web/src/main/webapp/.gitignore
+new file mode 100755
+index 0000000..c846d68
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/.gitignore
+@@ -0,0 +1,6 @@
++/odf-web.js
++/odf-web.js.map
++/odf-client.js
++/odf-client.js.map
++resources
++resources/**
+diff --git a/odf/odf-web/src/main/webapp/WEB-INF/web.xml b/odf/odf-web/src/main/webapp/WEB-INF/web.xml
+new file mode 100755
+index 0000000..9e16b0d
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/WEB-INF/web.xml
+@@ -0,0 +1,52 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<web-app id="WebApp_ID" version="3.0"
++	xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++	xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
++	<display-name>odf-admin</display-name>
++	<servlet>
++		<servlet-name>odf-admin-servlet</servlet-name>
++		<servlet-class>org.glassfish.jersey.servlet.ServletContainer</servlet-class>
++		<init-param>
++			<param-name>javax.ws.rs.Application</param-name>
++			<param-value>org.apache.atlas.odf.admin.rest.ODFAdminApp</param-value>
++		</init-param>
++		<load-on-startup>1</load-on-startup>
++		<enabled>true</enabled>
++		<async-supported>false</async-supported>
++	</servlet>
++
++	<servlet-mapping>
++		<servlet-name>odf-admin-servlet</servlet-name>
++		<url-pattern>/odf/api/v1/*</url-pattern>
++	</servlet-mapping>
++
++	<security-constraint>
++		<web-resource-collection>
++			<web-resource-name>Secure resources</web-resource-name>
++			<url-pattern>/*</url-pattern>
++		</web-resource-collection>
++		<auth-constraint>
++			<role-name>admin</role-name>
++			<role-name>user</role-name>
++			<role-name>moderator</role-name>
++		</auth-constraint>
++	</security-constraint>
++	<login-config>
++		<auth-method>BASIC</auth-method>
++		<realm-name>ODF Realm</realm-name>
++	</login-config>
++</web-app>
+diff --git a/odf/odf-web/src/main/webapp/client_index.html b/odf/odf-web/src/main/webapp/client_index.html
+new file mode 100755
+index 0000000..8af76c0
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/client_index.html
+@@ -0,0 +1,14 @@
++<!doctype html>
++<html>
++<head>
++  <meta charset="utf-8">
++  <title>Data lake application</title>
++</head>
++<body>
++   <div id="odf-toplevel-div" class="container-fluid">
++     Loading...
++   </div>
++   <script type="text/javascript" src="odf-config.js"></script>
++   <script type="text/javascript" src="odf-client.js"></script>
++</body>
++</html>
+diff --git a/odf/odf-web/src/main/webapp/img/lg_proc.gif b/odf/odf-web/src/main/webapp/img/lg_proc.gif
+new file mode 100755
+index 0000000000000000000000000000000000000000..7dd40efdf2290b9c96c55fd773083b559a0c05bb
+GIT binary patch
+literal 17230
+zcmaHySzJ<!_xAT@Mi4<!aReN2Ry6aNQ;2hFIc1inI8;_vSe>$R0K_3PLo0`}#4$57
+zoVBa~=b=KI%TriZR#w<(!^uDW-s|_=?GG3GX7gRov(|c+Ux2Ukig*XW0eG1O49%2H
+zJ^w5mYE*pvGX3W3=kGuN{r$V^;r`m8XJ>}<-hO&D@Oa_e<c6xD<MR1$mBVRYzJEDA
+zyyetz)Twdi<vZ{1zv=3m=3N+n+k7XfcIff*_unK>3-7*oHSp-=nF+tzp*xE|KYsZ!
+zUp;c_{pYWeM>ikOU!Rz*UikXs^~bL@L(;cjrbnNAtR0%G85)1R@csSg@12wHYKE^!
+zr`w(@CcgYw=)B!qJ=FH-_2cf_cl+)Sif+!|kbnB|>))xl=Gt3fJ@?MljpcXWEBW~C
+z>yKZHim&fqEXWprE>1k_yf8ld^1~PTyL;b${JQaM=;PwYnRl<=fB9H9en5CLxb@zS
+z*Pre-jZMox4F3A}=fdL4`A=VN&fLFp_gTfr>JHgV-Hq+@pBB%JyngWNOY_9-3Hghc
+zip3A#W<)n1UYts+AC@<bk6*ZPsbQ?I`9}Z68<$@wrW;1a>PIhKyZ@p2=9yQD4-F&A
+zUnUzyCBmVqR@sn9*4`{Dc>8IgV#HVcKv6d8RzBvww6sKB!mRd*2>0>~@o})FLqPxl
+zsNZ2|7?1^6E1#7g$pHY6OPnlTHa{oZhZg3Mk(pVT>4BtK=AJAq%U%Z<9xci!Ja#6-
+z16Wsn?xbbd*`w7}XRC{}8XC`0w2rk3MMYOd0Hw3kr?Odi>0)cmRSzF&4o&0QXy3@y
+z-rh{wEuLQVA^D**M@cjvNj4>)BpRvGx&^35fBq~FrFE3AqYXZ9>jb<73<1SmV~)?I
+z7vIou3-UAaKjAOKn(oYw5M6nhe7fFAjk&rIz$R8l50O?ZOEOtAQbtHB@8FN0$>V1i
+znfNO{awjSrPrB>hNjf(PODzOCVUX+#9@_BHWSP8?S6>#<@y0aA{O%>ya@HVmM;UoT
+z(0o4Lb1i?WHE5|e{UD4HqU)M=w;<=xD9lbR>obZ}*^Pc0xQ@oMD%m7Tbj!29c<}Bp
+z46Rp=+;Vt&|8}y)cKVUh{sSaCvVGXF_*d8G(M=g)bs+}*ou>OUlis%Mcq}*(<FTPb
+zcBZDgTE|2GK)F%hfz;>xe7r^I3zefUZyaD~Cvp6b(;O)b^H0@%?5W2W({>p>$uIq=
+zSdp@nIO}={<qJvTEOE!vQBMyeC3y2BmOPV81UBvm3$}>K5&Fq2K&E5Q$lmpioy(6%
+z-f|j*Vkvz5GgYcTh^k8)$|XM9T%B*iR0t`=y5#3Nc0HR1HK;=m9+B-RV%|}dSUwX^
+z`AT}eRG4iomL7Mq6<6Sm&DV>IvlWWL<1t1eX*`n^YFg+J`3zDPli3O>rRm@s%GVYp
+z51N9)?}OUf;#5e88;c96Ol{kme|*ie16&*(Pcf$<Dg`i=tRlJ;ZDt*csZM*tF^8u+
+zvtC=cIiDJ+HBYmbV(iinT3{-6t^)PbVL}Ny=y$wW-7Z<qLFk&;BkE#>O_aaDBo!!@
+z*f%x|v1%510Wu!d2E@%jdP}lBaq3Nwt(arpCbAXJ<8Jw45a68+u81z%K2amx4aRP6
+zd*%||(($Tc{Ox61naX{2Q0BU`7Yw>^36a<{{!pFO9CE4s<;&=I&7Gx@990Ah#=g{n
+zGx=pp<Ng;RJW7%#fDAJvX%vyd^w#E<mX-4&*J*~4GBq-5GBRp1>oTxFc*Vui9kk8S
+z(T5u98_w1ChG|k;F0S+2NKjXAKUCe*Rok5zcJg}ZDo?`QfuWAhbM<vy(={HnNj^SE
+z?UmZS!3Q3pVWA$!x*PvFhrBlF>wrejy}rA7;&UUT0foYcM+Cj5!cVqDf6H86Jjo%i
+z+#6EVNSZ1rJiKOnbwKM>Q$P{I+=d41o-N>}W*wxDbY8p#7L{RVMaE3;G1$sSViQN7
+zv9>V4c6~2ZToisnwcMPv0R`inY7lDf4)bD!pEF7d=}eNp=?cdxRG$_1=c--`UJ8)w
+zZ%@u{^U#aM?N;x97Y#0_$gaX5CbpV<_Q2fRuH562@`yCyL$mV~szsRfzC+n2vw^hT
+z#UHuHyf)vke~|V&#e#qAU`9}%_3dYS3$2@%ZG3UhZwzm3!>ckB=!NH-*SPcL3ToO8
+z{lR6+JJx!R=e?Ay@E#`W-bdKpzjV8}{ogRn*~d8cDBc5G`muB*ZSiH|YQ$IaTwh*X
+z{>0mzp_QrRLlH!A9M#M5jw&<;l}6Tf)At|2W7C&`>L?1EtZC=(jM7<l^cfeYW5gcP
+zk4-brS6TKC4<uWkNJD^@zp3m0%3z7S3Qb!=64fT0Ye9_n=8>FObwkC`B{HC3gT?kn
+zlhu5L`T21yLJV#y^jMYbjb8qpf&!!4g#BoBC(sj)q_f3Xo^>_!L|i%n$U<N}y)_(T
+zQU@clVT@PWE<wF#G=t~STmruN>o7V_q!@&|%!!B9AL{^~8kJ1>`P!^`=dJZF)9Wi6
+z4mG_#Psmj;B#ra|6-0@^lP(6%*s3&9Vq@5z&BPD@Nh07j=MdKXf&Q#h0h)AOO0m`_
+z&U0QyxuJ^Z+a_#J9R@*vD{C^w8ZRbwTu$%-jZH4hw;gHNK8kZGjD8QX;@Be^o_-YD
+zYDG<t?dZF%zytcT&#p*uuFZH3)Xth}uZFOm`EUYlT*!flb*@Kq2p<!qP!j6-I2$mp
+z#pXh-k5zLK0m&Tckj^oR2u}=O?%R*~Aao$H@OZ~@J2U%s?btibDd3f&JrI(W^RGQJ
+z7+&=g$<NIFPjMhP1R?qu48Q~G?l_rP_f9%t*B8RM|JY`R2L(emb|P?f?ztG-J~s0v
+z(sm~2k=>!mZ|Gfmisk<eYL=I8u<0LZ+o+`k6=~{Dq2``CeHsz066BDoM)<>G^dXJR
+z%nZXYfQFz{cIBMi5wtQDt#+Mo?tIh5jEjaDTJApGH&jg0j8xHguGdMMng=gtdSKme
+zoW}pP9I|if_TBsEq6Z#Nx9H8}95hLRyqtS|{lJ0Eo|`YT9z2(v^h_MWC7t?alBcS-
+zIA6J)VsukYXBE*r)U!6fz|!5?-{RhIlsz_FY9k>S5B2BoQqV)0NyksFOkz40@MI@j
+z&6n+5m=n~z2n2VPn{((NZk@0+e)|2ewD5SjmPNtkj^gLJ#SQ%2DD!|W&D@+G=MHhx
+z+F&gzYiT*C*vjU)c?C$DH|ZG%#;kVlshAJ1m&w$~_cohKK7VL1gwNPhlEk3qz@bzk
+zJ7pZw7io(6dxYa`ue+jhNAZjWsb8oO;UM_U@C$kq%{{a_glaC<d6+*g?8!Ld`OpLS
+zt>AppOUpYwuwt$7@o*kj!yiu(1e*rzjgC|~^?A>x!hNvooz~xXz7@Yx0i%+a{*eHO
+zUMos$1S`oC8LPuZ3F?S+k@%@j|G(z~P;S%#Kp%1T8;^GiI|E&RH<Tqzj|=jzrVdpQ
+z;aI&k&H$RsOB1N+?o8w6S*Roc+glbzRou83IkPaE&%yo`IxsBNUZF0orTBPLb(TP{
+zST+~us{b9CgfiX3EL!u1!Od{n8B*d&`vrQH(#4#FLdTfJda~xP)@+>1i2zT)URUtA
+z%p>We6uvtQ7(Phm$z#`Oji8WNqKQgg7CDk4w9??y@}c=s5J^r?^Q7vKLd<+@c-H$Z
+z8Zh(X{wzcaBg{&UXNv1F)*`8j?~_-FYcg5voOmk*^{t9)vm7WJ(DCG_@&Y#GYg>(6
+z?r-lnd+L<tkkh1Bwjn_0(@x&_kph783_zHw!-B)S@sr*fn*Q;gyDJq8KqN;1xmWG1
+zJ?Cv+(x&*AgGruH0#(>~xX+4%8Iod-%}CYHy?4#;y`wTY(5~^8DQqRkmM3x$u5>H{
+zG|>=pwZAh{;A+-(p&UFDs~7;Zw)eFTbf-z!qqxt_T&TIp3|z~Ycyw@LS-&;^1|B<^
+z8g6zXh;vIfS-`&=V8Vr~cpsw!H&*RRf#3XOZJ&z>cMbVMN)*b!%6tx~ex1UTB=!%3
+z7ed5n-NG5*G46_dQ0|kZy6>O;PrC=uW-wc46}vt<<hdEDl|PpDbTfItZ2q4Eugcm}
+z#B(e-<UcHFmhbM>LCYOdS2B*R^mH&?=9q=}AIJJDQg%W18L6tG(P~YEjT<-9d?=Oe
+zr&sT{cXEQ%)}3p-Ku`jX)ze(pF&brJVbOEBuc7h08bKmGq%^n7sH3L>6Krg5*x#C*
+zx^qwcfwB+`QY)VF%r}sqzj#%<JoV^G^v5WSQ;PJ8hoKX(`r9z3Ol@&wMgPfcZ!LIm
+zK8eEg@;t2$2=_j8nAli#9J@LFki--^kUE<%oht1|*=TB7(??#uKSM^WZJu~GIrU;(
+zE7^JC=VtjTxCP?p=U?j+?iDQ4DSY;p!lzbKEuGQ?tL~{Gl1`y0;%z62NS976G0W*&
+z+m6B6O14JcgkjZsPGJESyW~C5%PMMa)8lvxIWzL0_m{(xg~N6Q_czO*Ev8mef%LQU
+z;eX2@S{1iKJZBC>qR7a9z6-wO4a4soB*2n`FARI0&pe1S;<zj;G40#3Uc!ic1FmNm
+zSUoAOjD%-<=roFK{vAK^rkz3mu%fyEGcg#y7qMo91DAgQV2pp^%l`g)=B0Ec`^R|B
+zlCFF>fM6JuBZ2(hD9pyiqI%Fe9*)C+zKU%lbT>{H!X(yi^{U3v#SBkSN4ITymxlsO
+z$WLcQJ)=<R^5agJe1FMNlWWc+c=*@Zew>rERlMItrG|OzgFgXSg0mgb!z3RS2QO}t
+z0Rw=iD_Q|EtNE2MlLpFprI-^BHx(oNL!-zqY+9KWyWYza4*M0|^CLPcfFYIi_aY|8
+z<rGjEL1NW-R=LjO#gz~qK7OP0-&AHz0Ya1wjg9<k-J1Oevkyb9V3elD6)Mg?#AT0s
+zM$~J0pf7;?mGg9W&edgHm7E?t=R)6Q%J#o0DT>DddZYvZyu$P3v|Ur0(leRfGh3-<
+z=7HkQ`%dGhqptUcdz#tCycPF6FA8NebOcdzlHE1O(Q3BN3~`UHv(QuB&YIiO`u7m+
+zB(dY_+nlSq)29cnnpX<#E=NYi!w5X5ve}MylH8|Jl9=S?s~#ZaNKtU2xPK&4!5YNl
+z8G>TM>4IEP2dfjp8L~~Ax7RXf*+aCTZdCrQHIBuh8oorrOtO|-X?g8>LfW-Mj#0j#
+zpdtSb08rG$<uJzx{NVj!$Y#!jS=uk@ov=ikfk#Zo;*XE<*i;`hai!B&b+bOU)7@?D
+zHn8bk7*iqW#*YyLOv>+X&n)!f2k^WyXD7A5koP^$-54HDG5_tB1I_gloVVNCrz|fH
+z`b*`1ZrKfI5N}guVeyAk$i985sVjrLy(&aGfw&D3WU{cPw*K7zsAf3qXl;cXpmcP)
+zdyDEC_FWUB)zm`#9EZwS{-`YzH*BhJ^-W!u+#?V?eGrec-K^qJV>3O^YqsrQo_2Na
+zu3__W*&qy+7Pu&C-10%|41I&MvyKACgTPmp|H?nUngv6vcSWJ%3-hEoPfIgU{nZR>
+zc+khjpQPQl{G$+BB1T19VY$YKE(y$mJznabxidjUv}eoJWgAMw#!2<Tf@6NgKOS;M
+z$8#F6uCOz!{`hB&UyBQ}{wZ}*@KR_CDX>(A2mJ1SUC_u0qC#Dt*~W-#-<w+Lxix;d
+zeePJ8r`<KtBC`UXW14Hv6_k{wR-rW!;O5jB$R>kWhb>J!?!q%Da*4<Zy}S>&TY)W4
+z?z?~Nm@Er5NAjrT1x}YoEV8eD;|MjiS^n1n!~vR>yW6s__-8PE60xp-MZEiMO~h5b
+zTO{aN=A!0{`1PzO=a3{7wp?z8UwIsT_0SFxlnna_`fdAnV;Fqb(RjKXg8%3l%PSQ~
+zPHzZxL(SlZLl@T(AAN7_|I>T{q?`lE{@YuEMg1zSeNkr$a`=>-*lUB0iNM~;#_5Di
+zI$n$eiM`Oxj<_)IrNkC{22kv~x_p+rwx>Z0kw`=%NUlBfdJ1iC!##u@Ok_#|lNB$>
+zVe^C0@WAo|G|*zod2sjD??N63`~3P9#+}xqw<iqsj8~Q65@MFK&iZ~rblX-22YNys
+zXe?N=xtE7rhf3gR9}jyYCi&CS&Yg@tx|Rdg?RaFV!hP1IbNq3zXFh0`OdlczBnt-W
+zUvOFA#X?Y43);?K-mV@1Y&rw##XO({<XRUuW6YCI^YN|H#6i*Pg-vfw;jGi|dZ7xL
+zjkOOOF&j&pjcn2WoDly31&^9}r%9vZpT%l5zkQ94?W0R2C^gnVO_}0|VQZmeb0G-t
+zW3daQYv!nYyl>P@Y6wDch*OLG_%+HJVV%m8Ks)AlkFUW`4on0$%4kK-hX6)O_#fqj
+zrdW629sly=TlvYxD7gaN(%qmJW}_o4IO)f(uro$m2G!vVRmpu;)<53poo_S;K~u#q
+zYc*dIVc!EslY3D_t-9b^FcLet?{S>of*;(SSlwm+pLk~L!dzpHD5;BgPSD@&`v=b~
+zH#1kFk}sW_BAa?8q}xnZM^?1tCR)YCp@cO$MdXG?CAeExtUh_EBG5I?+Pbr|w(g4k
+z)juq+2p%X!*s@1QJGWH#+&m|{c6;AGqtqP__->%fKTo>$^lWl=PM)27^;$JGXhVE-
+z8&J`=DW)SopakZ_U5P+Ak1}~TPVX_&b@Uf75UL5V^%E}nNyvhA__jLFbE%0u<jUOm
+zXRX<TIj0eS!_p>i0tDJt$KC1Z+YB20ufOHLAj3J6`?86@*x!ZQ_{QZiAu<q}i!+GZ
+zr{HWG;b4m*jrWGeZ$*<y#HGv99z4tXVVgsfhjIkOt2p*v96}088@_!dH+S}hm8Vn-
+z$XiOAfu8#)*W5>lh7kCz&&|UTI{1lmjyamKj$6XO`h`;WLr!jA+Olu)9uxJwBX`W@
+zzje?2B_MdS1|iQe``l)aT6mY)T>2SAueJ~u7+AF(dw8QyaQMFDU;ppn@*lW)uyk+S
+z4HlFAdRbpH3J<t)zqHi#sl~=m;C|fe<;2lwL9#q@#^+Wp6u7rv$|;}V$hjGG20$Y5
+ziVgW?L_#bZn^aESum8K6r-tIC#pmKqBnP0vLgw<2I6TXkq$`LrLZDYWBf#a5^JkCn
+zNqC<$bkU4>sbo1WppKF>f8UR2n=Gz_uJ%|z$hBCN%0sYtqqmRq$_4TYJ?m--3A>f8
+zUXc|gNMO4w<dD-!lDs$D#!AF|$qmIYCUJUHmhHVE9#WF&O@JJ*6;jUEK~APUJ2bVP
+zk`KnF*`N2a)n(DjG8aRn6!_jZaHUpB%re*h>aBu%<$vXAD|<?~7Idyr2W6k>EIH4&
+zUXWsX-&pc5C9RL=a@~K#OKN?zI4NyW*2B|yy%7Iuwf>WTO95yh<Glu+{!;~|M|atz
+zxeQH(^?p_fwTSQ{R+G<5K>3*ZM?}z04d2G-d==UvyP;5zBQF2}q}vC;8=s;mBciKD
+z=Q;)$ST3H7nH(6=^kVs-k~-9-#PA6gkC-(!@&cyc-4BI1d(j<6z06Y?WJ4CPhb_zW
+zr$TAY)6IkxZ>M1}$0(Wc`N^<<@`%ew>4(9fzf?U5ehF`O++{!+7q=mzCYRszVe7oB
+zM6H%f5I&4Xr<>~(b57HS`hZw-)t+RrJjuVBT}xt#0YFn67)kjLAIuB1({)Y9XcOaz
+z{ul$RfY`v?ic_bv>{vv)wso9!y)_E8WsA<9m<{@f%C;Q0SkR@R_go`N=b8>#;=H=O
+z4FLz)^>w{{=dX(;Hn&X<4ptxvw6$Fx$4%T+THxCjFW39t&xXT<bh^twPwV|rbpH(z
+z8=l{`d1+yvFe}=(Bfx<~oZj)givoalcCm^ydq_?>&Uvf?L+bZc<+Z7Bxcx|+euxW8
+zftEj8V;u}Hb(XZ+3sv<X+uYFT&0uW_`a3>tp{*Rm{q6FEC?=}$gAu6aDEeKqN`u(6
+zMq?BHKv=`R`H=^+34=wN$r(IJ$K8<9zgSD}@{`%d8pi<FkME;I*_?Y_xy?xg{Jx#p
+z&W_ll4++4B^{QE*o`cwXXZARHNZsK<&w|SJk`;HwmBJ35=R*hwyq)I9l~v{sYz=Xd
+zWRu@UzI|ek=xWyMB)f4+9&j*_%WM6<o*7-G|5$3QpSmKK^W|=A`FMiSFHb&d^KXTW
+zWzd`b;*shk=h?r9%J63TBl{Vwpvlh_y!2xfPv&85Tr%aU*SDE<BZQ}4y5Yd#WC{hm
+zq|+Fn_bWg&XoyU+A0pwliA(vpG1YRzLoDG8DZn}3FrSBwrykBsQ}8gh!h9yz`ZocB
+zpnoL{7TO90-s)Cu$-uEplkc@l+UAZGup;XzyiZBSXL|`cmi{oQm{KAEPAaitl*Y2S
+z(!3Qa4*jsTKPK~t`5{RS_^mYv&glq4mKaSu${9q3=q7XEJ7{Pz*{qT-tubaFCZn*Q
+zx#y6kze1I|L)VQz<t}<igIeaB@$t|TKfHyHJWnN^^+JV-8yh?})uBu7J@SKRE9jQz
+z>s?gzbPOZbTR|_X?4D?;cj7iBQKICVtCz`)U@(z7C~1pU1;wzm%Lf=VUj}1`MsU=T
+z9rlzI`iu6=AO^ze8Ixh#d;f<o0w`&FJB&E3z}xkDN9pOJmg$RMHkGU*K+I+eTrUxJ
+zM5N}pP#Rgx_BvGe5ead~(b|?^>MdaqX@XREQ)C1PAf{hlc>{)5ZfEo>J#Wf2tUka%
+zTTU?OI%^}85QnC5*wtZuhmL|?4B^G`?`?;<Nbf{hp4yI=7yc#!=2uT@Qe@k<-;)|%
+z+*P9C9}1$Xbr`<)(n_yx>t=nG0DxtbWSWS)Gq$t(@B7N);UhX4r>W)7UEa-Ye+@+-
+zKQnw=)pdnZA}vb9RYS6GiV<4nbgzjW|J~2R17WyyE20bCn$C8?fV#SNfBIQB1P;U$
+zxx{H(pKCmKj149tDn+?wx-51&ov1`oTRZOj`M5ZL*F?A0R$I`Ojp-uxUb%YxJnG)P
+zEis_krIB1+D_6|(^mKNQ%WLa~bNAmXi-AGFKTopUa5Jv1R+ty`?nv>>UpQPz_5%m=
+zn30_9t1A!hgGF`iiZMmvR4UCCuLjf&6UFm$;!V|3HS?A1#$k(4fUZ%t)<`H3+n4f7
+zi84xV;MfV9Ip@Qj17LYA9oh^$U#PxevOy|L@$B>=t*FEnQW?N&2c+{m9g>6^bXUtF
+zOwJL#&VjaUT#CP(*0c4ne*4x>C9G%^*|)`S+2jioWbhMGemm)74fs475{u|}5L4j^
+z15WSVsIZSyaR*BhHyw_z-Me)#JGM~g`9n#GthG>JZX?{MdJPfNyn1&4Jtb=Q)E&c2
+zFPXab<pbHjRmc5{9__2SkKYS{7uW}sRI-ZZefPQ=N&ou(NqXeVe0;CAZV4?t8)0;U
+zWWMXk<%3B<Pu^~kKKD8B_VGW+aaAKnftc%HJww)t5-Lt{D$QAPE_$F^jxwgFVfm*?
+z-~d<6O_1kH+T26UI|UFD@{MsJ&rp%Vo&ZBDU%&@s_T<MGI%p*@3<<EU&()c;&SDL>
+zug)V&_z;wir?$85_kqK!etUXr6a0nsI&Q>@=Ni5dEBp!TxAhK!k!cmiu-B<+d|32A
+z?=S+v06dGKsL2KnqMZ85P$MFqL)fpJ@o0GE>qxZ@0dMlilwY2WXM*vP%_nI=in&@3
+zR6@=fyd!m6i8*dY(vYzj&!c3AZLo-Q*eUmG>WH-;IoDETmPlxydXq=w-$7dve9Z}v
+zyhs-PZEZ#neemGEsSpSdmZE6pXnn4>X-ASA%vG1s1#d2O?cU<kHQu0`qjHMG=tn&F
+zI4H&zOZVjv8$DbkO{Ym5Zr3#j7DT7+Q8>TnwC#4!tEWE;EyTwTvXxMp6a@96Ov~Ap
+z1CLgP+FI)+KYH7&L1#16)}GkDU{!pRo=rrZv-;4gJziib)>xE75wr-I7kt^T6e&nb
+zqqkgoH1$0MMf3N3+vR^qo=tZ7HUqsgF*I(s5kZWX+%bMSzx|TmE*DRtrg$@;<F1b%
+zY(tkB?7K{g8aF^$ma{;p?|7x6P2;nKqvlTZY(Gd5g!-uIEeiI?xjz@WkXV0^f%f^;
+zYL*f?;GA>I9bj>v0E_8^?OKmDQ2(LI*-At~5R-1jcC`X=W`Vh7<rTa@Tb5PT(R4PQ
+z=%VdnO{cT$5T`EXAk1)}>k-#t%uzOxNN=!~(5-AmqQJwfvF<-4H^fQe;#}>9%JX5c
+zv4YMU>9?9DB`@f(mIpjUEbj62cqgW-xUetI<<5(rR%UMyc8<wxc~SI50M<v0+RZ~A
+z(||kTqkt_{KF+^cts2g<GY-gD>#YY-h#_s5dAJcLHTZb5J>6BEEeqSo1^go#1{%+E
+zFxrVFSQCMF-DOW(d8Ma_=$X50LmKND7XlBd*4<#Rd~bqZ?j_bTm_YX}U;Bb*d&A>x
+z{@9Zuoux56y<;|~tev^NZFv$SC|8XG?y>*jckc*cOK95qD`hREV(toj(;-se+P0(i
+zrjmu~oL5?#5Th>k3H%q3ij6Mce;wcp^PsB|B~s@v^$yL^`43G@y0%ig;!^!RE1h+r
+z2$j#a?$T9?vxABLW&K|MAHWAy*T<+=4`~_Vo7K`Fo>v<5oTkp*3f=fiW8$T%`+?dg
+zzN*GEmt2o!y+-SSfC`%on^|rz&itk65XXV^amg@*Q)o0V3tOCPKzc+4xOgWjH76&E
+zi_A63#}8*4I&mraS$HZFs@j)k#I<Na)f}Nq7oi2g_;}--Rm#F3K#wRWnci5*vn5Ip
+zeuarfqznKF5l*djlvFoied#I^AkhT;mD)kIeh9-ydjN1%%?+~Q(;4NWS;AIdJijDq
+zaWap5&=-W{QyBz_Fx!yoe|Fc$*M4fNXlbzO?3NYz#Dlak1{&Q&ku+3}t?)+|ynmfX
+z+;qw>TPM1%$x?@pO$DmhKUyVKk9G*&81}pe89Uap1aqe~q7=r_h!oK<Y{c*gc#$6!
+zDmlykYA->A*J()+8s<&OniWxiRkH?5{4UmqC4AML2?zsFPtIa4B7q?LcpBofm~*AT
+zP;5=3`rC6GpZbCfLj7h3IDqy3`L1n<BjR?Qd_M{xXrJkWX;><s-cR)TEWgkQT;J*`
+z5%F2ffv4A8xkyqPe(>tavwH1ht@$H2uwe|$_L0`yrcb@tw993q9%%r5?9m(SyVGG4
+z^bety<$^t94t7W1gPIR2E5LOTY)-GQnTmZCv3=DHeqF?5KSGO6@gaKnQwKq4yXJqw
+z=w_FQV-<p+E6CD?|4A?8nHAt*utHXmtCbZ7V+9K=Z#tQusH=;sJS(iOsZVDU!4nsn
+z5N5WCEuyxn&N@twQfxuDtEKt2lK$2+M~Az6CtQfOvYbG>SaV6hrF%owwWGb`N<EGP
+z5%Mg!-E(vE#8)C+yV5&)p98&sP2+n9eo7N0;<e9W3n_U<hX*xoMcAZ@&1RTds^r)W
+zyPpl@6QaJFuAuJLRD&pNN1!_D8fsMN>Kbo*qgr=Qv&E4_|Kn#(t(fey13Vx9@hZ1{
+z`1<dgf=r?C65Pnc&*w~`Rt>y9K@<L1v-2L6g@dDF2#me7)x{(UfWR(BSG5^*z!1|t
+zaea7)gumWtR2{N9u;P=pZ4USE4-LypNXNOr7KeWKJxrNEy7f!s%RtlY*Vrxl?igdM
+z1CE0y{JK@yj~vyZ#nIQbZ1m<T)y};#`JQrrIsZv%Z+aBY4cLA^IKlY(829^bn_H*x
+ztq%W|=)2gIm$e4=FDE>?UDmTza!on1Th|mp-F@v)$Cr1niHE0#j~&yQqYir+SP&oZ
+z^st{_`H{oeTdJs$_$Zdj&NLy5tbtvi@V5=f<5m6+5(&wfL3n9y4g;U9YB&^vgkrvO
+zXRzk#M#Fkev&sDi$j|qm!e}t(Q{ehODyPV#oKdq>n5i$P6hz*kh)?)!w(o~&A7uL+
+z&BDhImxW)e9>#|b<i`ulb;W#CT;}0C%}790SLjjMLxrU}$~dSLhfp;gqaUa`LG1bS
+zud0pk)O^jb!o}xAHA{iG4n1el?-kApc*Y|=IIMs6#A+3OO?FhPq~Y{V#jL==P5uUY
+z+;B1s_3Os#yrwoC*_#V3FH<Y2>l{XB5tIl71R$8`NiOV2udk~$NdPROEz7A~*ci>~
+zt!YEi+(j~th{2&~DXoK@bG`tB^H0s?twdVN`7uqJck8g>%a>M?%UAavsxAylU@-~L
+zR3J<OjU|77_1n%?u$o6=@fz%Hzh4X#k!pV!pIUhXs6F=nk&D?#&<_Iq=pln>`wqbC
+z%6VS5R^<att)9obN1r%Y40eoc1Q|T1)4k`Bz{r(q6`H<0dl2)3GAPEYss9&WJOm;T
+zZf0j|tE-z>VhjH>F3LV!Xlq`Qh||Tv>R2GmHn8GC>0!8?oq3yWNe!+R#%geNWyQKR
+zUpQ@MY#bZgmKY%DuD=Y*tgKjek}@|q6FN9FVm{v0+j|S_n{w5?$49s^A3Yw7YSRrE
+zH?Pr!$r@fL&1<?G25i2;A)wwPQu>W9Jva<;E2V5uU&zS@tkS+m1NfV`war+gAZN}D
+zDjyMPlM22h4|9lnoBb;SX8m)ZXNjVk)q*2{-AsrONJQQUL{g{IjrgoJo@Nkn#VX=Z
+z4p81~o+I;qtYPd{)nZKPc#~uI8oNH=Q60LNhF==!G6HH_iZI$Nj>(A=7ot&qFSH=F
+z(4l`Y)|=I*nmTNz#XR&vl<@g`Z7u62)v1HB9mlD~_Gd%~bw1&kDlIWv0|!vEGQK$7
+z;>I&({mNyyMZr||+wBjsOXtKvZj4okj9;H$?}5P-K_DK##gjWse35w~5dCvH3^bXt
+z&$3_{^&YVJXIUr%e-_S#jvfDLUu+n@_v_R;7QL#^|KC7e&afed%@S<f?Gdz4WokB&
+zipX?Hqg0X6w#g&th$I1c$Ulg)ksK;x#ZwXN>UhK!iiDA#!IDqsXDb+kNVBsu&?BLZ
+zz;lW*e*rwIGtpXez|g$sIobHcg7MPPm?%md+3*D0q-3x4cTgKhI`;w*zFQ_OTe&SX
+zr*te{5uM>&r`Uuc=Yve*%kMMXBDzYk^vw3L_(5=daM-j^e}H}NR6HsS%nB(1pX=JA
+z4xS_NIGmVDAS!uK)#-t3?hF(g_Jo@m#7fFoeMfM<!G79vMAK048VRM|+2XAVL9w@C
+z^@(9m_?c0HC-|TTp%*SX&9c0ylzHvAeOhd7T@9lL)EyU7oZ|xs8*Iy_5v{%rfQBmH
+z>Z0f`=fHe4dD~$Lme;ue#iRLF%#Ib6-}5x}tdAH#Y2{bOH2uV<#o`$!ikC*S0^L-K
+z1?X=~9UIv&qFRI-{glK3Pr#zII6r20_i5S>VyPT@wu<IwR;~`ABc^o_Jp!Csu3Qea
+zb|Jqi4W^+#AqVM^LY2sD{1hLdK?7eR;8qD`JF7B&D4C8%*?3M8VAbA9KH{=HU9UI)
+z1>TMHlMNDd^AJq5V(|6e2Vvnma?yMD`Q@S!z880q4HMW5c+vw*Gy%}(aBpe&x1NRr
+zCxp_8H?aTOoGNj33p@fhJ8C8fgy;W}!~6m`v(i{OzNt>Mt&J6AA<CL^^L>5sOlYfd
+zhjF!eU1B2cN<-iUF``9O*$VB9iZvg<tW--J?53T#6cKr8pb|Q5Y#tRg9-#aDmJG*&
+zv25?=NFP30Gac30F=md_g-uSrVS&179bRxX2%q*b32h;+r5^JxSZ)5e3|HtwcSR9N
+z$uib)ns<`UO#I-Zsyw@W8-HoD{5TpHnxjIQ&*gqb!G%!)cUPC>0@l$jY3IiVxaL3v
+z#{?3Oy2Z_N>`CcO91h_&A6$Ih{{?^<`JQE(IyP*(tBKLVH#!=RMXqEoJ!X&&jUKtH
+znXIj-_C@h?E@pJCE~l36q-sar{>W|T2)OEoJ&5<&RDS$g7}2glyi<@f8dFQONUxBU
+zXdamf;Y>!!PLAv{Qw|BXVaX~z@pGK-!?5Y6*alf_C*<234EUSl4D5H{Q2W<zjk*|(
+z#6pv%S7{k+JjuQ5vNwhm%F3O+7?!+uU8nJ{oCR1A?Dy5`*@J<&fw)B}@7B|onV;ED
+zP3`iB$r!pmogCwXO&(Qu(I4UARDR#fQ^7?_vvJmYN2Ktu%2!~njxHoSkDQ>0B!>z@
+zYp5<1c{Pr@+KJ1y!zBRfs1fHG#HTOzQAOEAw(l{6SPFTG15y9(r|xES-U}Ubbrbh^
+z7RP?L*uj=P7|r&Hk1H}r5}QIsztbN^np}$lG<A|>#*pnhWHGFD*OCDwd0;C|T`w3c
+zPbxh*lC4GlY_VlKqH+Bu@`24Gpuyp<EApV(pG7Q|X_7e%XA^OXt92d4%#ObPUBak-
+ztK-SUh$6>4sQ}P(7yPCp@KLc4>so5}sj5Su+ohW%<sF+sk(aLK7M!OFE|SWL>ttUC
+zRGkL!&`UNwR7B+a2P=|USV!kl(95~FU(YG&$_^ESMs>)Zt{?VXba<oo1l~J|JFExk
+zxUfUT8Pz)0#|f|+1j8eu)z`R9GWd|7I}~1r0VK&<VE~Ot<n)|ht(61UiU?&A)|po`
+z^@2ty{c5TMDaax4F*{GAZsW7Zy#0^H#af%oK_7gGn?O8?9!R$z+A5$RH)@KtX44O)
+zwC2Fnz3~!g4B220RQq|VmqwXpDgbp4-|_do4g25y4NaYu^9da}%D&n9(Ph88QQiM6
+z$ES|a_w7F)5f2dzNISpX-QWidsyWc9>vAc-LM2$OateGjECpcTHO)O?1eq%FzjQYX
+zfr#|-E%e0)!UchDZixRinURI~vuBxjCKFm`cJ6rTsSCN0RhMd8q3y=TcHPBI%@yIa
+zW#0HAd~knTM`vfOxo!8!E4dUPGb0077yRJi8tCIS#!;P7QL$sTiLqvvZ-s|t-hQv?
+z8|?kSyAEGJWB&73;%}v|b^Ez2$FpkxZ6{K-_Qa~1l{gC<f&;{-#N_wytt4Z?Nfj*e
+zeDDR|Bvj2>H<dpBn(F#Mi2hyAM0!{fnQVzrS#_A1on5sOoop`Ln(fNZw2qlvF#@RB
+zF|t)&aL@Bxi!<E-*0vEb8p4wBz-2GP^39j4odP6ZrZ7m>5~|SPaj^`sjruPjn)+J8
+z)!s9VV28{V<Yj-TQ@ne7-v^L@`}#yj7Fj1O3YRSa8p1tE^d^S4=lo6Y&B1p2_C9G%
+z%)xkSihdlHes!clS&^chs=05t`|or;y7`&;tw0k4A>xmA-{M#H+wX|wq)T|(J~aM(
+zKV$Rf23T`+o|o8r(@MQ-Pw$DhfMgFgWZGawHUD6cnY^FtrIfa*Ok(O&Q@FL545wRj
+zB%!yhm$RsD3J?lMNn7Qvp@Ib#6=4nZ=V|a9OVn^X%}3PLrNVwVk;5+|%PP|H3tXB}
+z*+eHFs<1Gt66l9BGCjF)>}nbGSXPhd*^+jg2*`6pt*66|fg+g+#=1EK!U?<6RSgnZ
+z77d7`H1^#Tdyhrw$*5S7H|m|B^z31f!r_$Df7$cVTZ4r&*>05hbLA{l*g$BSf&3pf
+zSZQyL*}0QB>wi1Ko}v=r>@o-83#vE9l1?~Rf-y-#i_+r+Bh{@LQF!Lwz`X)V^XZ!r
+z>&b@tg3WPmdleF7hY`nnBmcATpS4H>&ic&;vW4v;7|Vel+Wdp-3BR;kIZL$prP6%f
+zDw#P1YEa%3sp(C~mHkXoF;m56@6U@`5t|YK9q+NOVXcj@P>3q+``rPJ^^Ju7D+?YM
+z;GxZY#?DxcKgbbgsn9x&RUMPQ8atd*y>Kf;@m^SDzm=V~8J5UY^PqHGf)SNeK~qf>
+zUeK)%s2sqlFlM*Zpspz#jx2vYJZ`^I!GHw)CF2;ld~$P;#<OK*n(NACH?L{<;ki@F
+zZ|Hc(^;^(xl?3_(31t~U$r%X9w%G?Y@_x4reX!Bb3#}K$8oIyR)kb<VZB~$@5f@v{
+zL_6p&WMflD6i|(zY0j;hivLz%s*k%S!WWPC75I8ZBK{YDpqOffW@Kn+ITqE`3EU7T
+zD=zR)BhM=1M{TCT_4Q0RJTS7kd7V~UYgKC|&=ov7%AB~t3~aiV>mJ%))!vrj=jH7^
+z7JPF83Wd8(@x1`U{`vke%ctIJo`3#qY-T2VMb*0hyA_Ko@2MH%oXqwucdz)XX68s9
+zyk-+56*%4Lfkm{nB_uKWj~U0Nt)Nbpa9I!XH}y>go;nFJ4q-{2)i+>DDh>vT=C<%o
+z7`;O2$nAlg$y!?#)~PU?O=OIX()g6XUYFOEGFX&-JG=tU^RSiv%zcIHwO{ffqQy}N
+zN>sLlRh9zk;q|I^CaV|Z#Qx}>)X-#-+XL>DNUz{V0Yo|fGELO?-^UOiX|fU(fb70E
+zFzzVKQU9LB#F`UUmCF`n4B_8z?Rl0eZ`)Tar}egM{1h_Nf9jY;*IHM=OFxJM(QFx&
+z;?wVzEB^s~{xK6#pMqld;nnc0LgHX%Q_S$*1mynrF#5#%+i88(rY~<HSt0*+A36^1
+zo4fy1@AL`$xAl-Al^+CW&#e>iDCGkGPihY{&pJvr^sbka;qxMKE+O)GFxfK3c?6x#
+z9vQ}kncw$A8>xJR=%){)B_s?i#xsv(;Q=I^J&*ELYyX}K6uF8Sl46(S5bvd<8H=1p
+zIc8!oyO^#_$fHE~L@8dImnu0yj}b9bJb&O5IDsL;{!`o4DgNlI8Ogky`v(q4&S=lE
+zNj`FW%GpD9bBaOj{cR#IN7MPFVxnW=FcfZ${ozv=i&vK-mj7tYfyW?*`<1U&W|#7`
+z)R|eW^Q~~;%<j!%6%^ma5*rvb)$ihd-Vy|kx@=}3TgS$v@RP9+M$=_qytm%oRmb^S
+zTLxS#F<_GP7`bUDRjETjDj)J{#MD<u$J@#VgGt}i1i!pfPXbk&m3Zql6UCk+a#G0q
+zUUprla`Bo{ZPTrw2^jh-=`(x{6^0^DbB!}KS8Fj-u&V?isfoVeNck+wxdJznF`!Co
+z&d^;xUG0mtsFN`<T5<YswXGJdj)LJZPX!clET#>BJYacYt*>C)2Y(oGV5Fi+rTM1*
+zOxZ2-(*D<BinO0i+Ok6*gyD^jyLHOEyVJiQBa&ytLtUzqFpZT1lN^J@bvyQA;`hoq
+zH=bLtp&H9!H;2Zy%P9z?xyUB?zgDZ+Yc&Dvk<iSH(6A)L@jpg6i;_e`8fI2y6bUm(
+z>y-RPmR~sOqo<)!EvzbP%kVjO?mVLT0u?aK=#o|sbcFWwUhcb^i&%z~cJ$xwL^|Sw
+z#~Pb%WK(F^!H!OiDr^|(EOM2Xcd+77uiKL>>ZSJQbF^jQ;nb{V0Xm)%-A7~cXC0&y
+zW$@72AB*%*{z=d{I3G(Ah@O|^mNHWCK0!5T%`di24@V}STWZXD2Us}NJ)u}Jqjm=Q
+z-wHcv{@i0d5Z@jmG1K!)ahtp-(@gEu-$B`M2M@D0ZYxR)1?sNmb^Ztvg|s&<qeit~
+zOKYtQ(cE);i5{~2Z3<A@5gY5M|1qRgnN^&NeP$1VYuV*vu$D848^{{5a1BdNjf2X-
+zg!Mql3ut{(Ai{dxzG{-U@pal*X3JiqfD(J#p}xiNq!VQFx=1zOYRU2xL-!^F+0nz@
+zyHw#$C(?s{O`%m~)z<}()-{J;mX<m{c+co33Gt(>OC5Y*KQgf{<D+cvFUmXEgob}y
+z;fX_Abj1UIWj!JwINu1*021uYu-Muoh(Mvji2ic!Y($5g>;{ulNTfTj{8$=im4_h|
+zgq*W`FbELymAc8ycP$K+SZDfkYoJ!?B(X{?5iBEX(B%&^0;hZW1y+>`<lj~EtvO3*
+zP?tWW67oL8*qt`emRGWI_GK*SYAq70#Hmw8G>gnFxG=w-EgUE@trsA>rcv|3OXU_I
+zaYx~+K^3B6#!R2(SjQwrSL%4e4or0Fs--LA!?o;@75N~IXep^y`^Dn#M8FszmdIZP
+zlH4;Ahw2G#!X|97=H1tD`oIqha)C|_Q@h+lz<@Y(tRt$qrdMa<T9t`qOqqUhQ_~eE
+zh1c1Z%fPU9T#Pc*e2>RH46dX<PJwQvd-5-p_u)A>Yf;F>PV4!QCR5+NKX*X)Jt+3o
+zVv2Z7z@H-6dHGJ8h!Ne)q4akv5mZW~sMt$uHCBw-)N;5QUaXBu`ei4E9!i93q&iZV
+zQp{r|gZ6a-h_16dWC*D<Q7qBX-PP;`d~mw$tB<ytP>fofQ}RnDo{>yA{`_eNPwaSv
+zfvs3faTv2y3+|ZINs9TT?TQa7Bdnek4Bhcm+vqsqXBPZUB@w@cqhZn*+VwE%_AfZx
+zwNlW&(|1I|B{}m$9W`T!*Px&-@js`#JgVhdKugOb%<x#D-m-AVKQ1`S(mjl1n4yuG
+zQCpX(kzq(ey8l_VP{L{0x{R*O-uf#IBva9)$aMgzw_)gVS3^(tvKDcfCUUsGd*a^R
+z?piIp*Dcv;nsB7HJ~V^mfea0M@&0k7qwiDEx_1wIH4Kq7&2URg22i<a=828kT8eok
+za-35Dy(~Bnp+CPR2%<7iu;l%IIW!gRsVk1~8rLvvP>{~~(J?GMn871fp!qo)U*IA@
+zuth*H@=nNgEE(mS@2GW(d3|8iKd4&eVvGs_zJ-XPmd83MmNf_j#;Pe%r5Iq%#qvxX
+z{^MFyC^LMiaO?5iS~fK5ntN?Fl(jO!))za2TOaS{#CI`Eu}w8M?jWR>3TN+H4cry9
+zzE4leX&x250^NScSmfS-y;>LO+vpS{zeQ9VO&6@VILg^}*K9Ruu6ROi<D)aJjhPEP
+z<mMfEwvgcnAcy@d&^ijUHvRz3Fe)VNVJ44z#oVVkv5i;bwx3Keo6y=d0u-3M=+qt`
+zW?B4XzCT`=I`b@N|H?!2Guav62s|DlasHK$J-Cw!a^b(@8S1)o-BobaRTR%Of{GNH
+zgL>aBF~qIHOY97OxALL5WO<FY`-x;Dz~c7;1hnp&%Tdwwl_wfz_{>2-=e9H{e&ZrZ
+zvIJG@n5+hI98Z%Y1@IZ;<7y+a84TzjP=~0#Au>6R1}43^2)xXclu{JG-Y|+L;FNyr
+z6+us@ou!Wu)J80}Kvx%ZdjtG79S-!V4B%I(I^u_;#S3z=3P19ZE;YzRmR!4U7693Z
+zWRddj1%Ejk`tDy@BWm9RDu5QK4?&eRfnj+bG~b*LQ5C&u0(J@TCL03rV&z$y$?0Z9
+z1%0v!kUe5~;dTo*0cGjpR&mz|kDyEQnguQbQ;+ao*!3t7r)I`!1+YZ1WVjClV{fJT
+z=10HpwW#ADkW8X@ih?L4h#@X>49`Bc*wYiZEP;69ranI2RgbYbG3klKr4<jTCsqo%
+zr`RUx$|E~*Ac$qpv+b;*0#>RDsaqkrL1x?MY5B9cVz@Qz-@#Gy+8~f*=hYADY9|(c
+zLh5*DARrOON>SEM%8;nA9<Ak&majDU8?hy5$KSH#EQwTG%@12)xIyl=53#oDsM8}l
+zOfK#2SRvzTZ4&J0`e$35*nwoGxH{b#3zJDy{KcuBqgD!nI1{@4Kj+3-mQ<R1m=+dG
+z(jXzRTC}xmE&oIws9rQAN!gFqHZ;^9LzYHXw&eN<Gc}YXclXp^ZZ`}CmU&&e<c<yP
+zuJ0VVJkr~3h@~Z6l;-LU-|y^ce|Wzu(@<;K?K@di<nR+Ey~nk*kRL)nHg;tc(`K{6
+zNna*1!U{$jDWCF!Q21pB%SGZ#hAn9Kq100|@j4FVlg*sPMB(j|I>4gop2%l+iR6D*
+z1yrP(Rt*_x6&6no3ZK*PHRVU!@YYW$R!Kp^iMvStjcpy9pFrf{8u^yk&0kv?{Uei}
+z;lGOCOr8kW#G2bQ3c+f#YUtSf%-hSOjA9kdC88r2kXTk*Xq0|0PAI)d(?z_TvN=@x
+zgl~}<=Si9J7X7n3AoybN#HdQ51mEyQ_R?%s+*mG_kvtzIIFWaVmi=Vlxx>Z_k;(5K
+zwr>i|zuN*)!yEBiG&S3|?|RMqd9v87@Abz<N(>KncTiPFM|8TI07O>?=7%`^pjq8H
+zfUyus4)8zi=<Ci<&rBD5dPO!5xxQ7LuYi9X$O8DNj9}wwzm1@$8%qvFPkf7`f`~LJ
+zdZm6jgNIR15Tl};(|B0a>T<?GP375*mQImEiuOx$rfN`?G^<3V#V3HPf*y#EI;8u?
+zf#yQ_;4wI!Dn0twvv}_#u_lwxmyWqH@P2>H-wRc$u7F?SQN!YBuJC`oN!8CbjfV&*
+zoPns=p>}7VfOJ&k>SJ_;U`0TWoC)$sT))#h9x<dmJ7@YU65sG-j%q$_RsF$0JY}v@
+zw{Atgeu{~%7k0Z?=S*dhi^5!uA5}b9U)R7!YE2#m5#SjYc@C91P4H6bm@0st73$D^
+zfeZVZz{VUB1*zNF#cI~t4U9+Dx^iZ$FW@T0hc$MRw!cGe%3qMKovj3haD5(_T+DRt
+zm}jR=y6`=6PqL>NGj>_pkI|hMF@Z%FJuzuvrxC*7ZPRIq{-$IC2hsEB4x!PmkMjXy
+zH4oTcLw>swa0yI6RZ52Rs<<$9It;joIjhe40AVY@)`7+M^xQRp`PJ}Y17lYE4gWc|
+z(j&wGc{pN-&|$#JyDuBFas5M3earN`BPK-2=AR?MWpE2S;qsmm+B+M4|7q-(2iEBe
+z@VoRF)jU}`Ob)ahzM&_E(EWqIKG04N{Q@V>g>)dyqUP_bC-Pc3xGj%Wmp`M6TOp*x
+zy7fC-40;)yc2|Wm-MuqAl+$s1&;Mf>{{OiQU#o{Cg&7)>LXk*7(~|nX8B{oc&8!ku
+zWz=e9VoC0nUZ<LKsG6jjnvBej&hCy{lAh1Si)%@>y*<^PwbdCK8i3`sa+*hXPgk#0
+zINU?hMBL1#An&&g7kPMuh0gWg83xw<vkv)SsxwTfy5ST`c#@`jSZi~Ri@fEofYZLW
+zg3`Y&Uqu5;dr&ectZ1OhTP71^wWozvvOAM5V!8&fzPs-8N9V>7P;EFRVEx})eeG9j
+zIB{6(^9v8VOLtc&x81N@^dW%}oR#y+6bMZ?)x?^6b=Ez)Z6~{6Yd-q0z5TrYULWSY
+z_RY!cL6}t9yvAS&X;924+X*|e-A)soRw$w9h8Ur=(oJ^JtV|t$R2Vf5up}WWx<dn;
+zQ&pQId2SE-)(<5GyYn_g!6B$uNJf78O~*PUB48aQDs=Y`PyL-2?3Lv6Q81KOxe3Xm
+zkNv*oT9y~xC|k+Hs-na;>6Ou4#w&G`@3%01m|6u(<Br{C@b?k^xvCAZ`@LV6U^M?A
+zAkjO63Z1#QlxZAy4M2S*@SXy}sMZ)=h>5va?=^fePxH?eIM?@%Pza>V%?K&@RsQif
+zCqAi0U+=f1U&Bx(9)>o`E%!O>D|D{LyCo6&kN))}v<7c}FnLhJzK&8SSmT^5M)|Lb
+z0+#;Tuk6`KI4zD(3Df#J5^0lEVXEu)VE^n1V=S9lmPJ(N9`qewYy{0Y2Y4rWa`sE)
+zCTF>Unt8_x<%zeoCkiCPEX}py`dc{K2M*2*V$9?;l-dTAWim1;^D9$%;390)&eSS_
+zY3uNH-{>1SUoKUq_BmT?j_N88ZEA)t^Ry2bG;x~}djwnRbN8|l5b^JW2&^u<(_0<p
+zER>KYE{7!30<D4h4zW?I6w}c4kp~^MyfRO^xUCHazY})B3#e>s<6~eP(6_sLo7|_X
+z+<RPlb@5pssLyg{OEGQ=#Y^-&LfJ-qkX``0{;?)7t}SLR<ly2$7+@w@*$?m}82v)g
+z6&QaW=(j%V2@b@?2%%VdRQdu$_k+Ck3VJ(Ry5au<R{*I01q5)x7x$*D!^#%J!3WGM
+z$Q;1O?vBxEUM~c2^1UjvyzkI4A1pH%Hq*<M0R<2M2L)fipfUw82n~nEE380(3%nl3
+WLjhwDa5Nt)XpArc8a(S!AOJg<B}_j6
+
+literal 0
+HcmV?d00001
+
+diff --git a/odf/odf-web/src/main/webapp/index.html b/odf/odf-web/src/main/webapp/index.html
+new file mode 100755
+index 0000000..d177e35
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/index.html
+@@ -0,0 +1,14 @@
++<!doctype html>
++<html>
++<head>
++  <meta charset="utf-8">
++  <title>Open Discovery Framework</title>
++</head>
++<body>
++   <div id="odf-toplevel-div" class="container-fluid">
++     Loading...
++   </div>
++   <script type="text/javascript" src="odf-config.js"></script>
++   <script type="text/javascript" src="odf-web.js"></script>
++</body>
++</html>
+diff --git a/odf/odf-web/src/main/webapp/odf-config.js b/odf/odf-web/src/main/webapp/odf-config.js
+new file mode 100755
+index 0000000..6bb4a47
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/odf-config.js
+@@ -0,0 +1,15 @@
++/**
++ * 
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++const API_PATH = "odf/api/v1/";
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js b/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js
+new file mode 100755
+index 0000000..67bb709
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js
+@@ -0,0 +1,473 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var bootstrap = require("bootstrap");
++
++var React = require("react");
++var ReactDOM = require("react-dom");
++var LinkedStateMixin = require('react-addons-linked-state-mixin');
++var ReactBootstrap = require("react-bootstrap");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var configurationStore = require("./odf-configuration-store.js");
++var metadataStore = require("./odf-utils.js").MetadataStore;
++var ODFGlobals = require("./odf-globals.js");
++
++var Button = ReactBootstrap.Button;
++var Row = ReactBootstrap.Row;
++var Col = ReactBootstrap.Col;
++var Table = ReactBootstrap.Table;
++var Modal = ReactBootstrap.Modal;
++var Input = ReactBootstrap.Input;
++var Alert = ReactBootstrap.Alert;
++var Panel = ReactBootstrap.Panel;
++var Label = ReactBootstrap.Label;
++var Input = ReactBootstrap.Input;
++var Image = ReactBootstrap.Image;
++
++var OdfAnalysisRequest = {
++	NewAnalysisRequestButton : React.createClass({
++
++		getInitialState : function(){
++			return {showAnalysisRequestDialog : false};
++		},
++
++		open : function(){
++			this.setState({showAnalysisRequestDialog: true});
++		},
++
++		onClose : function(){
++			this.setState({showAnalysisRequestDialog: false});
++			if(this.props.onClose){
++				this.props.onClose();
++			}
++		},
++
++		render : function() {
++			return (
++					<span>
++						<Button bsStyle={this.props.bsStyle} onClick={this.open}>Start analysis (service sequence)</Button>
++						<OdfAnalysisRequest.NewAnalysisRequestDialog show={this.state.showAnalysisRequestDialog} dataSetId={this.props.dataSetId} alertCallback={this.props.alertCallback} onClose={this.onClose}/>
++					</span>
++			);
++		}
++
++	}),
++
++	NewAnalysisRequestDialog : React.createClass({
++
++	  mixins : [AJAXCleanupMixin],
++
++	  getInitialState : function() {
++	    return ({config: null, discoveryServices: [], errorMessage: null, discoveryServiceSequence: []});
++	  },
++
++	  close : function() {
++		  this.clearDialogState();
++		  if(this.props.onClose){
++			  this.props.onClose();
++		  }
++	  },
++
++	  submitRequest : function() {
++		this.setState({requestInProgress : true});
++	    var dataSet = this.refs.inputDataSet.getValue();
++	    var discoveryServiceIDs = $.map(this.state.discoveryServiceSequence,
++	       function(dsreg) {
++	          return dsreg.id;
++	       }
++	    );
++
++	    var repositoryId = this.state.repositoryId;
++	    var metadataObjectRef = {
++	      repositoryId: repositoryId,
++	      id: dataSet
++	    };
++	    var analysisRequest = {
++	      dataSets: [metadataObjectRef],
++	      discoveryServiceSequence: discoveryServiceIDs
++	    };
++
++	    // now post request
++	    // clear alert
++	    if(this.props.alertCallback){
++	    	this.props.alertCallback({type: "", message: ""});
++	    }
++	    var req = $.ajax({
++	      url: ODFGlobals.analysisUrl,
++	      contentType: "application/json",
++	      dataType: 'json',
++	      type: 'POST',
++	      data: JSON.stringify(analysisRequest),
++	      success: function(analysisResponse) {
++	        if(!this.isMounted()){
++	        	return;
++	        }
++	    	if (analysisResponse.invalidRequest) {
++	          this.setState({errorMessage: analysisResponse.details, requestInProgress: false});
++	        } else {
++	          var msg = "Analysis request was started. ID: " + analysisResponse.id;
++	          if(this.props.alertCallback){
++	      	    this.props.alertCallback({type: "success", message: msg});
++	          }
++	      	  this.close();
++	        }
++	      }.bind(this),
++	      error: function(xhr, status, err) {
++	        var msg = "Error while reading ODF services: " + err.toString();
++	        this.setState({errorMessage: msg, requestInProgress: false});
++	      }.bind(this)
++	    });
++
++	    this.storeAbort(req.abort);
++	  },
++
++	  componentDidMount : function() {
++		  this.loadDiscoveryServices();
++	  },
++
++	  loadDiscoveryServices : function() {
++	    var req = configurationStore.readConfig(
++	      function(config) {
++	    	if(!this.isMounted()){
++	        	return;
++	        }
++	        this.setState({config: config});
++	        // clear alert
++	        if(this.props.alertCallback){
++	        	this.props.alertCallback({type: "", message: ""});
++	        }
++	        var req2 = $.ajax({
++	          url: ODFGlobals.servicesUrl,
++	          dataType: 'json',
++	          type: 'GET',
++	          success: function(data) {
++	        	if(!this.isMounted()){
++	  	        	return;
++	  	        }
++	            this.setState({discoveryServices: data});
++	          }.bind(this),
++	          error: function(xhr, status, err) {
++	            var msg = "Error while reading ODF services: " + err.toString();
++	            if(this.props.alertCallback){
++	        	    this.props.alertCallback({type: "danger", message: msg});
++	            }
++	         }.bind(this)
++	        });
++	        this.storeAbort(req2.abort);
++	      }.bind(this),
++	      this.props.alertCallback
++	    );
++
++	    this.storeAbort(req.abort);
++	  },
++
++	  getDiscoveryServiceFromId : function(id) {
++	      var servicesWithSameId = this.state.discoveryServices.filter(
++	         function(dsreg) {
++	             return dsreg.id == id;
++	         }
++	      );
++	      if (servicesWithSameId.length > 0) {
++	        return servicesWithSameId[0];
++	      }
++	      return null;
++	  },
++
++	  processDiscoveryServiceSelection : function() {
++	      var selection = this.refs.inputAvailableDiscoveryServices.getValue();
++	      var dsreg = this.getDiscoveryServiceFromId(selection);
++	      if (dsreg) {
++	        var newSequence = this.state.discoveryServiceSequence.slice();
++	        newSequence.push(dsreg);
++	        this.setState({discoveryServiceSequence: newSequence});
++	      }
++	  },
++
++	  clearDialogState : function() {
++	      this.setState({discoveryServiceSequence: [], requestInProgress : false, });
++	  },
++
++	  render : function() {
++	     var alert = null;
++	     if (this.state.errorMessage) {
++	        alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
++	     }
++	     var servicesOptions = $.map(
++	            this.state.discoveryServices,
++	            function(dsreg) {
++	              return (<option key={dsreg.id} value={dsreg.id}>{dsreg.name}</option>);
++	            }.bind(this)
++	        );
++
++	     var discoveryServiceSequenceComponents = $.map(this.state.discoveryServiceSequence,
++	         function(dsreg) {
++	            return <li key={dsreg.id}>{dsreg.name} ({dsreg.id})</li>
++	         }
++	     );
++
++	     var waitingContainer = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
++	     if(!this.state.requestInProgress){
++	    	 waitingContainer = null;
++	     }
++
++	     return (
++	       <Modal show={this.props.show} onHide={this.close}>
++	         <Modal.Header closeButton>
++	            <Modal.Title>Start analysis (specify service sequence)</Modal.Title>
++	         </Modal.Header>
++	         <Modal.Body>
++	         	{waitingContainer}
++	            {alert}
++	            <Input type="text" ref="inputDataSet" label="Data Set" value={this.props.dataSetId} readOnly={this.props.dataSetId}></Input>
++	            <hr/>
++	            Select a service from the "Available Services"
++	            dropdown to append it to the sequence. Repeat selection to run multiple services for the data set.
++	            <Input type="select" onChange={this.processDiscoveryServiceSelection} ref="inputAvailableDiscoveryServices" label="Available Services">
++	              <option key="emptySelection">&lt;Select a service...&gt;</option>
++	              {servicesOptions}
++	            </Input>
++	            <strong>Service Sequence</strong>
++	            <ol>{discoveryServiceSequenceComponents}</ol>
++	            <hr />
++	            <Button bsStyle="warning" onClick={this.clearDialogState}>Clear Sequence</Button>
++	        </Modal.Body>
++	        <Modal.Footer>
++	        <Button onClick={this.submitRequest} bsStyle="primary">Submit</Button>
++	        <Button onClick={this.close} >Cancel</Button>
++	        </Modal.Footer>
++	       </Modal>
++	     );
++	  }
++
++	}),
++
++	NewCreateAnnotationsButton : React.createClass({
++
++		getInitialState : function(){
++			return {showCreateAnnotationsDialog : false};
++		},
++
++		open : function(){
++			this.setState({showCreateAnnotationsDialog: true});
++		},
++
++		onClose : function(){
++			this.setState({showCreateAnnotationsDialog: false});
++			if(this.props.onClose){
++				this.props.onClose();
++			}
++		},
++
++		render : function() {
++			return (
++					<span>
++						<Button bsStyle={this.props.bsStyle} onClick={this.open}>Start analysis (annotation types)</Button>
++						<OdfAnalysisRequest.NewCreateAnnotationsDialog show={this.state.showCreateAnnotationsDialog} dataSetId={this.props.dataSetId} alertCallback={this.props.alertCallback} onClose={this.onClose}/>
++					</span>
++			);
++		}
++
++	}),
++
++	NewCreateAnnotationsDialog : React.createClass({
++
++		  mixins : [AJAXCleanupMixin],
++
++		  getInitialState : function() {
++		    return ({config: null, annotationTypes: [], errorMessage: null, analysisTypeSelection: []});
++		  },
++
++		  close : function() {
++			  this.clearDialogState();
++			  if(this.props.onClose){
++				  this.props.onClose();
++			  }
++		  },
++
++		  submitRequest : function() {
++			this.setState({requestInProgress : true});
++		    var dataSet = this.refs.inputDataSet.getValue();
++		    var annotationTypeIDs = $.map(this.state.analysisTypeSelection,
++		       function(annotationTypeId) {
++		          return annotationTypeId;
++		       }
++		    );
++
++		    var repositoryId = this.state.repositoryId;
++		    var metadataObjectRef = {
++		      repositoryId: repositoryId,
++		      id: dataSet
++		    };
++		    var analysisRequest = {
++		      dataSets: [metadataObjectRef],
++		      annotationTypes: annotationTypeIDs
++		    };
++
++		    // now post request
++		    // clear alert
++		    if(this.props.alertCallback){
++		    	this.props.alertCallback({type: "", message: ""});
++		    }
++		    var req = $.ajax({
++		      url: ODFGlobals.analysisUrl,
++		      contentType: "application/json",
++		      dataType: 'json',
++		      type: 'POST',
++		      data: JSON.stringify(analysisRequest),
++		      success: function(analysisResponse) {
++		        if(!this.isMounted()){
++		        	return;
++		        }
++		    	if (analysisResponse.invalidRequest) {
++		          this.setState({errorMessage: analysisResponse.details, requestInProgress: false});
++		        } else {
++		          var msg = "Analysis request was started. ID: " + analysisResponse.id;
++		          if(this.props.alertCallback){
++		      	    this.props.alertCallback({type: "success", message: msg});
++		          }
++		      	  this.close();
++		        }
++		      }.bind(this),
++		      error: function(xhr, status, err) {
++		        var msg = "Error starting discovery request: " + err.toString();
++		        this.setState({errorMessage: msg, requestInProgress: false});
++		      }.bind(this)
++		    });
++
++		    this.storeAbort(req.abort);
++		  },
++
++		  componentDidMount : function() {
++			  this.loadannotationTypes();
++		  },
++
++		  loadannotationTypes : function() {
++		    var req = configurationStore.readConfig(
++		      function(config) {
++		    	if(!this.isMounted()){
++		        	return;
++		        }
++		        this.setState({config: config});
++		        // clear alert
++		        if(this.props.alertCallback){
++		        	this.props.alertCallback({type: "", message: ""});
++		        }
++		        var req2 = $.ajax({
++		          url: ODFGlobals.servicesUrl,
++		          dataType: 'json',
++		          type: 'GET',
++		          success: function(data) {
++		        	if(!this.isMounted()){
++		  	        	return;
++		  	        }
++		            var ids = [];
++		            $.each(data, function(key, dsreg){
++			            $.each(dsreg.resultingAnnotationTypes, function(key, annotationTypeId){
++			            	if($.inArray(annotationTypeId,ids) == -1){
++				            	ids.push(annotationTypeId);
++			            	};
++			            });
++		            });
++		            this.setState({annotationTypes: ids});
++		          }.bind(this),
++		          error: function(xhr, status, err) {
++		            var msg = "Error while reading ODF services: " + err.toString();
++		            if(this.props.alertCallback){
++		        	    this.props.alertCallback({type: "danger", message: msg});
++		            }
++		         }.bind(this)
++		        });
++		        this.storeAbort(req2.abort);
++		      }.bind(this),
++		      this.props.alertCallback
++		    );
++			 metadataStore.getProperties(
++					 function(data) {
++					     this.setState({repositoryId: data.STORE_PROPERTY_ID});
++					 }.bind(this)
++			 );
++		    this.storeAbort(req.abort);
++		  },
++
++		  processAnalysisTypeSelection : function() {
++		      var selection = this.refs.inputAvailableAnnotationTypes.getValue();
++		      if (selection) {
++		        var newSelection = this.state.analysisTypeSelection.slice();
++		        newSelection.push(selection);
++		        this.setState({analysisTypeSelection: newSelection});
++		      }
++		  },
++
++		  clearDialogState : function() {
++		      this.setState({analysisTypeSelection: [], requestInProgress : false, });
++		  },
++
++		  render : function() {
++		     var alert = null;
++		     if (this.state.errorMessage) {
++		        alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
++		     }
++		     var analysisTypeOptions = $.map(
++			            this.state.annotationTypes,
++			            function(annotationTypeId) {
++			              return (<option key={annotationTypeId} value={annotationTypeId}>{annotationTypeId}</option>);
++			            }.bind(this)
++			        );
++
++		     var analysisTypeSelectionComponents = $.map(this.state.analysisTypeSelection,
++		         function(annotationTypeId) {
++		            return <li key={annotationTypeId}>{annotationTypeId}</li>
++		         }
++		     );
++
++		     var waitingContainer = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
++		     if(!this.state.requestInProgress){
++		    	 waitingContainer = null;
++		     }
++
++		     return (
++		       <Modal show={this.props.show} onHide={this.close}>
++		         <Modal.Header closeButton>
++		            <Modal.Title>Start analysis (specify annotation types)</Modal.Title>
++		         </Modal.Header>
++		         <Modal.Body>
++		         	{waitingContainer}
++		            {alert}
++		            <Input type="text" ref="inputDataSet" label="Data Set" value={this.props.dataSetId} readOnly={this.props.dataSetId}></Input>
++		            <hr/>
++		            Select an annotation type from the "Available Annotation Types"
++		            dropdown to append it to the list. Repeat selection to create multiple annotation types for the data set.
++		            <Input type="select" onChange={this.processAnalysisTypeSelection} ref="inputAvailableAnnotationTypes" label="Available Annotation Types">
++		              <option key="emptySelection">&lt;Select an annotation type...&gt;</option>
++		              {analysisTypeOptions}
++		            </Input>
++		            <strong>Selected Annotation Types</strong>
++		            <ol>{analysisTypeSelectionComponents}</ol>
++		            <hr />
++		            <Button bsStyle="warning" onClick={this.clearDialogState}>Clear Selection</Button>
++		        </Modal.Body>
++		        <Modal.Footer>
++		        <Button onClick={this.submitRequest} bsStyle="primary">Submit</Button>
++		        <Button onClick={this.close} >Cancel</Button>
++		        </Modal.Footer>
++		       </Modal>
++		     );
++		  }
++
++		})
++}
++
++
++module.exports = OdfAnalysisRequest;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-client.js b/odf/odf-web/src/main/webapp/scripts/odf-client.js
+new file mode 100755
+index 0000000..de64367
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-client.js
+@@ -0,0 +1,1087 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++require("bootstrap/dist/css/bootstrap.min.css");
++
++var $ = require("jquery");
++var bootstrap = require("bootstrap");
++
++var React = require("react");
++var ReactDOM = require("react-dom");
++var LinkedStateMixin = require('react-addons-linked-state-mixin');
++var ReactBootstrap = require("react-bootstrap");
++
++var Nav = ReactBootstrap.Nav;
++var NavItem = ReactBootstrap.NavItem;
++var Navbar = ReactBootstrap.Navbar;
++var NavDropdown = ReactBootstrap.NavDropdown;
++var Button = ReactBootstrap.Button;
++var Grid = ReactBootstrap.Grid;
++var Row = ReactBootstrap.Row;
++var Col = ReactBootstrap.Col;
++var Table = ReactBootstrap.Table;
++var Modal = ReactBootstrap.Modal;
++var Alert = ReactBootstrap.Alert;
++var Panel = ReactBootstrap.Panel;
++var Label = ReactBootstrap.Label;
++var Input = ReactBootstrap.Input;
++var Jumbotron = ReactBootstrap.Jumbotron;
++var Image = ReactBootstrap.Image;
++var Dropdown = ReactBootstrap.Dropdown;
++var DropdownButton = ReactBootstrap.DropdownButton;
++var CustomMenu = ReactBootstrap.CustomMenu;
++var MenuItem = ReactBootstrap.MenuItem;
++var Tooltip = ReactBootstrap.Tooltip;
++var OverlayTrigger = ReactBootstrap.OverlayTrigger;
++var Glyphicon = ReactBootstrap.Glyphicon;
++
++var ODFGlobals = require("./odf-globals.js");
++var OdfAnalysisRequest = require("./odf-analysis-request.js");
++var NewAnalysisRequestButton = OdfAnalysisRequest.NewCreateAnnotationsButton;
++var ODFBrowser = require("./odf-metadata-browser.js");
++var Utils = require("./odf-utils.js");
++var AtlasHelper = Utils.AtlasHelper;
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var UISpec = require("./odf-ui-spec.js");
++
++
++var knownAnnotations = {
++	"Default": [{value : "annotationType", style : "primary", label: "Unknown"}],
++	"ColumnAnalysisColumnAnnotation" : [{value: "jsonProperties.inferredDataClass.className", style: "danger" , label: "Class name"}, {value: "jsonProperties.inferredDataType.type", style: "info", label :"Datatype"}],
++	"DataQualityColumnAnnotation": [{style: "warning", value: "jsonProperties.qualityScore" , label: "Data quality score"}],
++    "MatcherAnnotation": [{style: "success", value: "jsonProperties.termAssignments", label: "Matching terms"}]
++};
++
++////////////////////////////////////////////////////////////////
++// toplevel navigation bar
++
++const constants_ODFNavBar = {
++  odfDataLakePage: "navKeyDataLakePage",
++  odfTermPage: "navKeyTermPage"
++}
++
++var ODFNavBar = React.createClass({
++   render: function() {
++       return (
++         <Navbar inverse>
++           <Navbar.Header>
++             <Navbar.Brand>
++               <b>Shop for Data Application, powered by Open Discovery Framework</b>
++             </Navbar.Brand>
++             <Navbar.Toggle />
++           </Navbar.Header>
++           <Navbar.Collapse>
++             <Nav pullRight activeKey={this.props.activeKey} onSelect={this.props.selectCallback}>
++               <NavItem eventKey={constants_ODFNavBar.odfDataLakePage} href="#">Data Lake Browser</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.odfTermPage} href="#">Glossary</NavItem>
++             </Nav>
++           </Navbar.Collapse>
++         </Navbar>
++       );
++   }
++});
++
++var ODFAnnotationLegend = React.createClass({
++
++	render : function(){
++		var items = [];
++		$.each(knownAnnotations, function(key, val){
++			$.each(val, function(key2, item){
++				items.push(<Label key={key + "_" + key2} bsStyle={item.style}>{item.label}</Label>);
++			});
++		});
++
++		return <div>{items}</div>;
++	}
++
++});
++
++var ODFAnnotationMarker = React.createClass({
++
++	render : function(){
++		var annotationKey = "Default";
++		var annotationLabels = [];
++		if(this.props.annotation && knownAnnotations[this.props.annotation.annotationType]){
++			annotationKey = this.props.annotation.annotationType;
++			var tooltip = <Tooltip id={this.props.annotation.annotationType}>{this.props.annotation.annotationType}<br/>{this.props.annotation.summary}</Tooltip>
++			$.each(knownAnnotations[annotationKey], function(key, val){
++				var style = val.style;
++				var value = ODFGlobals.getPathValue(this.props.annotation, val.value);
++				if (annotationKey === "MatcherAnnotation") {
++					value = value[0].matchingString; // if no abbreviation matches this will be the term; ideally it should be based on the OMBusinessTerm reference
++				}
++				else if(value && !isNaN(value)){
++					value = Math.round(value*100) + " %";
++				}
++				annotationLabels.push(<OverlayTrigger key={key} placement="top" overlay={tooltip}><Label style={{margin: "5px"}} bsStyle={style}>{value}</Label></OverlayTrigger>);
++			}.bind(this));
++		}else{
++			var tooltip = <Tooltip id={this.props.annotation.annotationType}>{this.props.annotation.annotationType}<br/>{this.props.annotation.summary}</Tooltip>
++			annotationLabels.push(<OverlayTrigger key="unknownAnnotation" placement="top" overlay={tooltip}><Label style={{margin: "5px"}} bsStyle={knownAnnotations[annotationKey][0].style}>{this.props.annotation.annotationType}</Label></OverlayTrigger>);
++		}
++
++		return <div style={this.props.style}>{annotationLabels}</div>;
++	}
++});
++
++
++var AnnotationsColumn = React.createClass({
++	mixins : [AJAXCleanupMixin],
++
++	getInitialState : function(){
++		return {annotations: []};
++	},
++
++	componentDidMount : function() {
++		if(this.props.annotations){
++			this.setState({loadedAnnotations : this.props.annotations});
++			return;
++		}
++
++		if(this.props.annotationReferences){
++			this.loadColumnAnnotations(this.props.annotationReferences);
++		}
++	},
++
++	componentWillReceiveProps : function(nextProps){
++		if(!this.isMounted()){
++			return;
++		}
++
++		if(nextProps.annotations){
++			this.setState({loadedAnnotations : nextProps.annotations});
++			return;
++		}
++	},
++
++	render : function(){
++		if(this.state){
++			var annotations = this.state.loadedAnnotations;
++			if(!annotations || annotations.length > 0 && annotations[0].repositoryId){
++				return <noscript/>;
++			}
++
++			var processedTypes = [];
++			var colAnnotations = [];
++			$.each(annotations, function(key, val){
++				if(processedTypes.indexOf(val.annotationType) == -1){
++					processedTypes.push(val.annotationType);
++					var style = {float: "left"};
++					if(key % 6 == 0){
++						style = {clear: "both"};
++					}
++
++					var summary = (val.summary ? val.summary : "");
++					colAnnotations.push(<ODFAnnotationMarker style={style} key={key} annotation={val}/>);
++				}
++			});
++
++			return <div>{colAnnotations}</div>;
++		}
++		return <noscript/>;
++	}
++
++});
++
++var QualityScoreFilter = React.createClass({
++
++	getInitialState : function(){
++		return {key: "All", val : "0", showMenu : false};
++	},
++
++	onSelect : function(obj, key){
++
++		if(obj.target.tagName != "INPUT"){
++			this.setState({key: key});
++			var equation = "All";
++			if(key != "All"){
++				if(this.refs.numberInput.getValue().trim() == ""){
++					return;
++				}
++				equation = key + this.refs.numberInput.getValue();
++			}
++			this.props.onFilter(equation);
++		}
++	},
++
++	textChange : function(event){
++		var equation = "All";
++		if(this.state.key != "All"){
++			if(this.refs.numberInput.getValue().trim() == ""){
++				return;
++			}
++			equation = this.state.key + this.refs.numberInput.getValue();
++		}
++		this.props.onFilter(equation);
++	},
++
++	render : function(){
++		var items = [];
++		var values = ["<", "<=", "==", ">=", ">", "!=", "All"];
++		$.each(values, function(key, val){
++			items.push(<MenuItem onSelect={this.onSelect} id={val} key={key} eventKey={val}>{val}</MenuItem>)
++		}.bind(this));
++
++		var menu = <div bsRole="menu" className={"dropdown-menu"}>
++			<h5 style={{float: "left", marginLeft: "15px"}}><Label ref="typeLabel">{this.state.key}</Label></h5>
++			<Input style={{width: "100px"}} ref="numberInput" onChange={this.textChange} type="number" defaultValue="1"/>
++			{items}
++		</div>;
++
++		return <div style={this.props.style}  >
++			<Dropdown id="quality score select" onSelect={this.onSelect} open={this.state.showMenu} onToggle={function(){}}>
++				<Button bsRole="toggle" onClick={function(){this.setState({showMenu: !this.state.showMenu})}.bind(this)}>Qualityscore filter</Button>
++				{menu}
++			</Dropdown>
++		</div>;
++	}
++});
++
++var DataClassFilter = React.createClass({
++
++	defaultClasses : ["US Zip", "Credit Card"],
++
++	render : function(){
++		var items = [];
++		var classes = (this.props.dataClasses ? this.props.dataClasses.slice() : this.defaultClasses);
++		classes.push("All");
++		$.each(classes, function(key, val){
++			items.push(<MenuItem id={val} key={key} eventKey={val}>{val}</MenuItem>)
++		});
++
++		return <div style={this.props.style}>
++			<DropdownButton id="Data class filter" onSelect={function(obj, key){this.props.onFilter(key)}.bind(this)} title="Data Class filter">
++				{items}
++			</DropdownButton>
++		</div>;
++	}
++});
++
++
++var FilterMenu = React.createClass({
++
++	getInitialState : function(){
++		return {showMenu : false, dataClassFilter: "All", qualityScoreFilter: "All"};
++	},
++
++	onQualityScoreFilter: function(param){
++		this.setState({qualityScoreFilter: param});
++		if(this.props.onFilter){
++			this.props.onFilter({dataClassFilter: this.state.dataClassFilter, qualityScoreFilter: param});
++		}
++	},
++
++	onDataClassFilter : function(param){
++		this.setState({dataClassFilter: param});
++		if(this.props.onFilter){
++			this.props.onFilter({dataClassFilter: param, qualityScoreFilter: this.state.qualityScoreFilter});
++		}
++	},
++
++	render : function(){
++		var menu = <div bsRole="menu" className={"dropdown-menu"}>
++			<QualityScoreFilter onFilter={this.onQualityScoreFilter}/>
++			<br />
++			<DataClassFilter dataClasses={this.props.dataClasses} onFilter={this.onDataClassFilter}  />
++		</div>;
++
++		return <div style={this.props.style}  >
++			<Dropdown id="filter menu" open={this.state.showMenu} onToggle={function(){}}>
++				<Button bsRole="toggle" onClick={function(){this.setState({showMenu: !this.state.showMenu})}.bind(this)}>Filter annotations</Button>
++				{menu}
++			</Dropdown>
++		</div>;
++	}
++
++});
++
++
++var SelectCheckbox = React.createClass({
++
++	getInitialState : function(){
++		return {selected : this.props.asset.isSelected};
++	},
++
++	componentWillReceiveProps : function(nextProps){
++		if(!this.isMounted()){
++			return;
++		}
++		if(nextProps.asset.reference.id != this.props.asset.reference.id){
++			this.setState({selected : nextProps.asset.isSelected});
++		}
++	},
++
++	onChange : function(selected){
++		if(this.props.onChange){
++			this.props.onChange(selected);
++		}
++		this.setState({selected : selected});
++	},
++
++	render : function(){
++		return <div><Input style={{marginTop: "-6px"}} type="checkbox" label=" " checked={this.state.selected} onChange={function(e){
++			this.onChange($(e.target).prop("checked"));
++		}.bind(this)}/></div>;
++	}
++
++});
++
++var ODFDataLakePage = React.createClass({
++
++	columnAnnotations : {},
++
++	getInitialState : function(){
++		return {
++			ajaxAborts : [],
++			sourceLoading: false,
++			columns: [],
++			dataClasses: [],
++			qualityScoreFilter: "All",
++			dataClassFilter: "All",
++			importFeedback: {msg: null, style: "primary"}
++		};
++	},
++
++	componentDidMount : function() {
++		this.loadSources();
++	},
++
++	loadSources : function(){
++		this.searchAtlasMetadata("from RelationalDataSet", function(data){
++			 $.each(data, function(key, source){
++				 source.isSelected = false;
++			  });
++			this.setState({filteredSources: data, sources: data});
++		}.bind(this));
++	},
++
++	searchAtlasMetadata : function(query, successCallback, errorCallback) {
++		var url = ODFGlobals.metadataUrl + "/search?" + $.param({query: query});
++		$.ajax({
++			url: url,
++			dataType: 'json',
++			type: 'GET',
++			success: function(data) {
++				successCallback(data);
++			},
++			error: function(xhr, status, err) {
++				console.error(url, status, err.toString());
++				var msg = "Error while loading metadata: " + err.toString();
++				if(errorCallback){
++					errorCallback(msg);
++				}
++			}
++		});
++	 },
++
++	load : function(assetRef){
++		$.each(this.state.ajaxAborts, function(key, abort){
++			if(abort && abort.call){
++				abort.call();
++			}
++		});
++		this.setState({ajaxAborts : []});
++
++		var req = AtlasHelper.loadAtlasAsset(assetRef, function(data){
++			var source = data;
++			var refresh = false;
++			if(this.state == null || this.state.selectedTable == null || this.state.selectedTable.reference.id != source.reference.id){
++				console.log("set state source " + new Date().toLocaleString());
++				this.setState({selectedTable: source});
++				if(source.annotations == null){
++					source.annotations = [];
++				}
++				if(source.columns == null){
++					source.columns = [];
++				}
++			}else{
++				source.annotations = this.state.selectedTable.annotations;
++				refresh = true;
++			}
++
++			this.loadSourceAnnotations(source, refresh);
++			this.loadColumns(source, refresh);
++		}.bind(this), function(){
++
++		});
++	},
++
++	loadSourceAnnotations : function(source, refresh){
++		if(!refresh || !source.loadedAnnotations){
++			source.loadedAnnotations = [];
++		}
++        var reqs = AtlasHelper.loadMostRecentAnnotations(source.reference, function(annotationList){
++            if (refresh) {
++            	var newAnnotations = [];
++            	if(source.loadedAnnotations.length > 0){
++            		$.each(annotationList, function(key, val){
++            			if(!this.atlasAssetArrayContains(source.loadedAnnotations, val)){
++            				newAnnotations.push(val);
++            			}
++            		}.bind(this));
++            	}else{
++            		newAnnotations = annotationList;
++            	}
++                source.loadedAnnotations = newAnnotations;
++            }else{
++            	source.loadedAnnotations = annotationList;
++            }
++            console.log("set state source anns " + new Date().toLocaleString());
++            this.setState({selectedTable: source});
++        }.bind(this), function(){
++
++        });
++
++        var ajaxAborts = [];
++		$.each(reqs, function(key, req){
++			ajaxAborts.push(req.abort);
++		}.bind(this))
++		this.setState({ajaxAborts : ajaxAborts});
++	},
++
++	atlasAssetArrayContains : function(array, obj){
++		for(var no = 0; no < array.length; no++){
++			var val = array[no];
++			if(val && val.reference && obj && obj.reference && val.reference.id == obj.reference.id){
++				return true;
++			}
++		}
++		return false;
++	},
++
++	loadColumns : function(dataSet, refresh){
++		var columns = [];
++		if(refresh){
++			columns = this.state.columns;
++		}
++		var reqs = AtlasHelper.loadRelationalDataSet(dataSet, function(result){
++			var foundAnnotations = false;
++			if(!refresh){
++				$.each(result, function(key, col){
++					if(col.annotations && col.annotations.length > 0){
++						foundAnnotations = true;
++					}
++					if(col.isSelected == null || col.isSelected == undefined){
++						col.isSelected = false;
++					}
++					columns.push(col);
++				});
++			}else{
++				//if result size is different, reset completely
++				if(result.length != columns.length){
++					columns = [];
++				}
++				//if the old array contains any column that is not in the new columns, reset completely
++				$.each(columns, function(key, col){
++					if(!this.atlasAssetArrayContains(result, col)){
++						columns = [];
++					}
++				}.bind(this));
++				$.each(result, function(key, col){
++					//only add new columns
++					if(!this.atlasAssetArrayContains(columns, col)){
++						columns.push(col);
++					}
++					if(col.annotations && col.annotations.length > 0){
++						for(var no = 0; no < columns.length; no++){
++							if(columns[no] == null || columns[no] == undefined){
++								col.isSelected = false;
++							}
++							if(columns[no].reference.id == col.reference.id){
++								columns[no].annotations = col.annotations;
++								break;
++							}
++						}
++						foundAnnotations = true;
++					}
++				}.bind(this));
++			}
++
++			if(!foundAnnotations){
++				if(!Utils.arraysEqual(this.state.columns, columns)){
++					console.log("set state columns " + new Date().toLocaleString());
++					this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns});
++				}else{
++					console.log("columns same, no annotations, dont update");
++				}
++			}else{
++				this.loadColumnAnnotations(columns, refresh);
++			}
++		}.bind(this), function(){
++
++		});
++
++        var ajaxAborts = [];
++		$.each(reqs, function(key, req){
++			ajaxAborts.push(req.abort);
++		}.bind(this))
++		this.setState({ajaxAborts : ajaxAborts});
++	},
++
++	loadColumnAnnotations : function(columns, refresh){
++		var annotationRefs = [];
++		$.each(columns, function(key, col){
++			if(!refresh || !col.loadedAnnotations){
++				col.loadedAnnotations = [];
++			}
++		});
++
++		var requests = [];
++		var annotationsChanged = false;
++		var dataClasses = [];
++		$.each(columns, function(key, column){
++			var req = AtlasHelper.loadMostRecentAnnotations(column.reference, function(annotations){
++				$.each(annotations, function(key, annotation){
++					if(!this.atlasAssetArrayContains(column.loadedAnnotations, annotation)){
++						annotationsChanged = true;
++						column.loadedAnnotations.push(annotation);
++					}
++					if(annotation &&
++							annotation.inferredDataClass && dataClasses.indexOf(annotation.inferredDataClass.className) == -1){
++						dataClasses.push(annotation.inferredDataClass.className);
++					}
++				}.bind(this));
++			}.bind(this));
++			requests.push(req);
++		}.bind(this));
++
++		$.when.apply(undefined, requests).done(function(){
++			if(annotationsChanged){
++				console.log("set state column anns " + new Date().toLocaleString());
++				this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns, dataClasses: dataClasses});
++			}else{
++				if(!Utils.arraysEqual(this.state.columns, columns)){
++					console.log("set state column anns " + new Date().toLocaleString());
++					this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns});
++				}else{
++					console.log("columns same, annotations same, dont update");
++				}
++			}
++		}.bind(this));
++
++        var ajaxAborts = [];
++		$.each(requests, function(key, req){
++			ajaxAborts.push(req.abort);
++		}.bind(this));
++		this.setState({ajaxAborts : ajaxAborts});
++	},
++
++	storeColumnAnnotation : function(columnId, annotation){
++		if(!this.columnAnnotations[columnId]){
++			this.columnAnnotations[columnId] = [];
++		}
++		if(!this.atlasAssetArrayContains(this.columnAnnotations[columnId], annotation)){
++			this.columnAnnotations[columnId].push(annotation);
++		}
++	},
++
++	componentWillUnmount : function() {
++		if(this.refreshInterval){
++			clearInterval(this.refreshInterval);
++		}
++   	},
++
++	referenceClick : function(asset){
++		if(this.state == null || this.state.selectedTable == null || this.state.selectedTable.reference.id != asset.reference.id){
++			if(this.refreshInterval){
++				clearInterval(this.refreshInterval);
++			}
++			this.setState({currentlyLoading : true, selectedTable: null, filteredColumns : [], columns: []});
++			this.load(asset.reference);
++			this.refreshInterval = setInterval(function(){this.load(asset.reference)}.bind(this), 15000);
++		}
++	},
++
++	doFilter : function(params){
++		var columns = this.state.columns.slice();
++		var filteredColumns = this.filterOnDataQualityScore(columns, params.qualityScoreFilter);
++		filteredColumns = this.filterOnDataClass(filteredColumns, params.dataClassFilter);
++		this.setState({filteredColumns: filteredColumns});
++	},
++
++	filterOnDataQualityScore : function(columns, equation){
++		if(equation.indexOf("All")>-1){
++			return columns;
++		}
++
++		var columns = columns.slice();
++		var matchedColumns = [];
++		$.each(columns, function(index, col){
++			var match = false;
++			$.each(col.loadedAnnotations, function(k, annotation){
++				if(equation && annotation.qualityScore){
++						if(eval("annotation.qualityScore" + equation)){
++							if(matchedColumns.indexOf(col) == -1){
++								matchedColumns.push(col);
++							}
++						}
++				}
++			}.bind(this));
++		}.bind(this));
++
++		return matchedColumns;
++	},
++
++	filterOnDataClass : function(columns, key){
++		if(key == "All"){
++			return columns;
++		}
++		var matchedColumns = [];
++		$.each(columns, function(index, col){
++			var match = false;
++			$.each(col.loadedAnnotations, function(k, annotation){
++				if(annotation.inferredDataClass &&
++						annotation.inferredDataClass.className == key){
++					if(matchedColumns.indexOf(col) == -1){
++						matchedColumns.push(col);
++					}
++				}
++			});
++		});
++
++		return matchedColumns;
++	},
++
++	doImport : function(){
++		var params = {
++				jdbcString : this.refs.jdbcInput.getValue(),
++				user: this.refs.userInput.getValue(),
++				password : this.refs.passInput.getValue(),
++				database :this.refs.dbInput.getValue(),
++				schema : this.refs.schemaInput.getValue(),
++				table : this.refs.sourceInput.getValue()
++		};
++
++		this.setState({importingTable : true, tableWasImported : true, });
++
++		$.ajax({
++		      url: ODFGlobals.importUrl,
++		      contentType: "application/json",
++		      dataType: 'json',
++		      type: 'POST',
++		      data: JSON.stringify(params),
++		      success: function(data) {
++		    	  this.setState({importFeedback: {msg: "Registration successful!", style: "primary"}, importingTable: false});
++		      }.bind(this),
++		      error: function(xhr, status, err) {
++				  if(this.isMounted()){
++					var errorMsg = status;
++					if(xhr.responseJSON && xhr.responseJSON.error){
++		    				errorMsg = xhr.responseJSON.error;
++		    		  	}
++				    	var msg = "Table could not be registered: " + errorMsg + ", " + err.toString();
++			    	  	this.setState({importFeedback: {msg: msg, style: "warning"}, importingTable: false});
++				  }
++		      }.bind(this)
++		    });
++	},
++
++	closeImportingDialog : function(){
++		if(this.state.importingTable){
++			return;
++		}
++
++		var newState = {tableWasImported: false, showImportDialog : false, importFeedback: {msg: null}};
++		if(this.state.tableWasImported){
++			this.loadSources();
++			newState.sources = null;
++			newState.filteredSources = null;
++		}
++		this.setState(newState);
++	},
++
++	shopData : function(){
++		var selectedColumns = [];
++		var selectedSources = [];
++
++		$.each(this.state.columns, function(key, col){
++			if(col.isSelected){
++				selectedColumns.push(col);
++			}
++		});
++
++		$.each(this.state.sources, function(key, src){
++			if(src.isSelected){
++				selectedSources.push(src);
++			}
++		});
++
++		console.log("Do something with the selected columns!")
++		console.log(selectedColumns);
++		console.log(selectedSources);
++	},
++
++	filterSources : function(e){
++		var value = $(e.target).val();
++		var filtered = [];
++		if(value.trim() == ""){
++			filtered = this.state.sources;
++		}else{
++			$.each(this.state.sources, function(key, source){
++				if(source.name.toUpperCase().indexOf(value.toUpperCase()) > -1){
++					filtered.push(source);
++				}
++			});
++		}
++		this.setState({filteredSources : filtered});
++	},
++
++	storeImportDialogDefaults: function() {
++		var defaultValues = {
++		   "jdbcInput": this.refs.jdbcInput.getValue(),
++		   "userInput": this.refs.userInput.getValue(),
++		   "passInput": this.refs.passInput.getValue(),
++		   "dbInput": this.refs.dbInput.getValue(),
++		   "schemaInput": this.refs.schemaInput.getValue(),
++		   "sourceInput": this.refs.sourceInput.getValue(),
++		};
++		localStorage.setItem("odf-client-defaults", JSON.stringify(defaultValues) );
++	},
++
++	render : function(){
++		var columnRows = [];
++		var sourceHead = null;
++		var sourceList = null;
++		var columnsGridHeader = <thead><tr><th>Column</th><th>Datatype</th><th>Annotations</th></tr></thead>;
++		var currentlyLoadingImg = null;
++		if(this.state){
++			var sourceListContent = null;
++			if(this.state.sources){
++				var sourceSpec =  {
++
++						attributes: [
++						       {key: "isSelected", label: "",
++								func: function(val, asset){
++									return <SelectCheckbox onChange={function(selected){
++										asset.isSelected = selected;
++									}.bind(this)} asset={asset} />
++
++								}},
++								{key: "icon", label: "", func:
++						    	   function(val, asset){
++							    	   if(asset && asset.type && UISpec[asset.type] && UISpec[asset.type].icon){
++							    		   return UISpec[asset.type].icon;
++							    	   }
++							    	   return UISpec["DefaultDocument"].icon;
++						       		}
++						       },
++							   {key: "name", label: "Name"},
++			                   {key: "type", label: "Type"},
++			                   {key: "annotations", label: "Annotations",
++					        	  func: function(val){
++					        		  if(!val){
++					        			  return 0;
++					        			  }
++					        		  return val.length;
++					        		}
++			                   }
++			            ]};
++
++				sourceListContent = <ODFBrowser.ODFPagingTable rowAssets={this.state.filteredSources} onRowClick={this.referenceClick} spec={sourceSpec}/>;
++			}else{
++				sourceListContent = <Image src="img/lg_proc.gif" rounded />;
++			}
++
++			var sourceImportBtn = <Button style={{float:"right"}} onClick={function(){this.setState({showImportDialog: true});}.bind(this)}>Register new data set</Button>;
++			var sourceImportingImg = null;
++			if(this.state.importingTable){
++				sourceImportingImg = <Image src="img/lg_proc.gif" rounded />;
++			}
++
++			var importFeedback = <h3><Label style={{whiteSpace: "normal"}} bsStyle={this.state.importFeedback.style}>{this.state.importFeedback.msg}</Label></h3>
++
++			var storedDefaults = null;
++			try {
++			   storedDefaults = JSON.parse(localStorage.getItem("odf-client-defaults"));
++			} catch(e) {
++				console.log("Couldnt parse defaults from localStorage: " + e);
++				storedDefaults = {};
++			}
++			if (!storedDefaults) {
++				storedDefaults = {};
++			}
++			console.log("Stored defaults: " + storedDefaults);
++
++			var sourceImportDialog =  <Modal show={this.state.showImportDialog} onHide={this.closeImportingDialog}>
++								          <Modal.Header closeButton>
++								             <Modal.Title>Register new JDBC data set</Modal.Title>
++								          </Modal.Header>
++								          <Modal.Body>
++								          	{importFeedback}
++								            <form>
++								          	 <Input type="text" ref="jdbcInput" defaultValue={storedDefaults.jdbcInput} label="JDBC string" />
++								             <Input type="text" ref="userInput" defaultValue={storedDefaults.userInput} label="Username" />
++								             <Input type="password" ref="passInput" defaultValue={storedDefaults.passInput} label="Password" />
++								             <Input type="text" ref="dbInput" defaultValue={storedDefaults.dbInput} label="Database" />
++								             <Input type="text" ref="schemaInput" defaultValue={storedDefaults.schemaInput} label="Schema" />
++								             <Input type="text" ref="sourceInput" defaultValue={storedDefaults.sourceInput} label="Table" />
++								             </form>
++								             {sourceImportingImg}
++								         </Modal.Body>
++								         <Modal.Footer>
++								         <Button onClick={this.storeImportDialogDefaults}>Store values as defaults</Button>
++								         <Button bsStyle="primary" onClick={this.doImport}>Register</Button>
++								         <Button onClick={this.closeImportingDialog}>Close</Button>
++								         </Modal.Footer>
++									</Modal>;
++			sourceList = <Panel style={{float:"left", marginRight: 30, maxWidth:600, minHeight: 550}}>
++								{sourceImportDialog}
++								<h3 style={{float: "left", marginTop: "5px"}}>
++									Data sets
++								</h3>
++								{sourceImportBtn}<br style={{clear: "both"}}/>
++								<Input onChange={this.filterSources} addonBefore={<Glyphicon glyph="search" />} label=" " type="text" placeholder="Filter ..." />
++								<br/>
++								{sourceListContent}
++							</Panel>;
++			if(this.state.currentlyLoading){
++				currentlyLoadingImg = <Image src="img/lg_proc.gif" rounded />;
++			}
++			var panel = <div style={{float: "left"}}>{currentlyLoadingImg}</div>;
++
++			if(this.state.selectedTable){
++				var source = this.state.selectedTable;
++				var sourceAnnotations = [];
++				if(source.loadedAnnotations){
++					//reverse so newest is at front
++					var sourceAnns = source.loadedAnnotations.slice();
++					sourceAnns.reverse();
++					var processedTypes = [];
++					$.each(sourceAnns, function(key, val){
++						if(processedTypes.indexOf(val.annotationType) == -1){
++							processedTypes.push(val.annotationType);
++							var summary = (val.summary ? ", " + val.summary : "");
++							sourceAnnotations.push(<ODFAnnotationMarker key={key} annotation={val}/>);
++						}
++					});
++				}
++
++				var hasColumns = (source.columns && source.columns.length > 0 ? true : false);
++				var columnsString = (hasColumns ? "Columns: " + source.columns.length : null);
++				var annotationsFilter = (hasColumns ? <FilterMenu onFilter={this.doFilter} dataClasses={this.state.dataClasses} style={{float: "right"}} /> : null);
++
++				sourceHead = <div>
++								<h3>{source.name} </h3>
++									<div style={{}}>
++										<NewAnalysisRequestButton dataSetId={this.state.selectedTable.reference.id} />
++									</div>
++								<br/>
++								Description: {source.description}
++								<br/>
++								{columnsString}
++								<br/>Annotations:{sourceAnnotations}
++								<br/>
++								{annotationsFilter}
++								</div>;
++
++				panel = <Panel style={{float: "left", width: "50%"}} header={sourceHead}>
++							{currentlyLoadingImg}
++						</Panel>;
++			}
++			var columnsTable = null;
++			var filteredColumns = (this.state.filteredColumns ? this.state.filteredColumns : []).slice();
++
++			if(filteredColumns.length > 0){
++				var colSpec = {attributes: [{key: "isSelected", label: "Select",
++					func: function(val, col){
++						return <SelectCheckbox onChange={function(selected){
++							col.isSelected = selected;
++						}.bind(this)} asset={col} />
++
++					}},
++	               {key: "name", label: "Name", sort: true},
++		           {key: "dataType", label: "Datatype"},
++		           {key: "loadedAnnotations", label: "Annotations",
++			        	  func: function(annotations, obj){
++			        		  return <AnnotationsColumn annotations={annotations} />;
++			        	  }
++			          }]};
++				columnsTable = <div><ODFBrowser.ODFPagingTable ref="columnsTable" rowAssets={filteredColumns} assetType={"columns"} spec={colSpec}/><br/><ODFAnnotationLegend /></div>;
++				panel = (<Panel style={{float:"left", width: "50%"}} header={sourceHead}>
++							{columnsTable}
++						</Panel>);
++			}
++		}
++
++		var contentComponent = <Jumbotron>
++	      <div>
++	         <h2>Welcome to your Data Lake</h2>
++	         	<Button bsStyle="success" onClick={this.shopData}>
++	         		Shop selected data  <Glyphicon glyph="shopping-cart" />
++         		</Button>
++	         	<br/>
++	         	<br/>
++		         {sourceList}
++		         {panel}
++		        <div style={{clear: "both"}} />
++         </div>
++       </Jumbotron>;
++
++		return <div>{contentComponent}</div>;
++	}
++});
++
++var ODFTermPage = React.createClass({
++
++  getInitialState() {
++    return {terms: []};
++  },
++
++  loadTerms : function() {
++    // clear alert
++    this.props.alertCallback({type: "", message: ""});
++    var req = AtlasHelper.searchAtlasMetadata("from BusinessTerm",
++
++        function(data){
++		   	if(!this.isMounted()){
++				return;
++			}
++			this.setState({terms: data});
++        }.bind(this),
++
++        function() {
++        }.bind(this)
++    );
++  },
++
++  componentDidMount() {
++    this.loadTerms();
++  },
++
++  render: function() {
++     var terms = $.map(
++        this.state.terms,
++        function(term) {
++          return <tr style={{cursor: 'pointer'}} key={term.name} title={term.example} onClick={function(){
++        	  var win = window.open(term.originRef, '_blank');
++        	  win.focus();}
++          }>
++                  <td>
++                     {term.name}
++                  </td>
++                  <td>
++                	{term.description}
++                  </td>
++                 </tr>
++        }.bind(this)
++       );
++
++     return (
++       <div className="jumbotron">
++       <h2>Glossary</h2>
++       <br/>
++       <br/>
++       <Panel>
++       	  <h3>Terms</h3>
++          <Table>
++          	 <thead>
++          	 	<tr>
++          	 	<th>Name</th>
++          	 	<th>Description</th>
++          	 	</tr>
++          	 </thead>
++             <tbody>
++                {terms}
++             </tbody>
++          </Table>
++          </Panel>
++       </div>
++     )
++   }
++});
++
++var ODFClient = React.createClass({
++
++   componentDidMount: function() {
++     $(window).bind("hashchange", this.parseUrl);
++     this.parseUrl();
++   },
++
++   parseUrl : function(){
++    var target = constants_ODFNavBar.odfDataLakePage;
++    var navAddition = null;
++    var hash = document.location.hash;
++    if(hash && hash.length > 1){
++      hash = hash.split("#")[1];
++      var split = hash.split("/");
++      var navHash = split[0];
++      if(split.length > 0){
++        navAddition = split.slice(1);
++      }
++      if(constants_ODFNavBar[navHash]){
++        target = constants_ODFNavBar[navHash];
++      }
++    }
++    this.setState({
++      activeNavBarItem: target,
++        navAddition: navAddition}
++    );
++  },
++
++  getInitialState: function() {
++    return ({
++        activeNavBarItem: constants_ODFNavBar.odfDataLakePage,
++        navAddition: null,
++        globalAlert: {
++          type: "",
++          message: ""
++        }
++    });
++  },
++
++  handleNavBarSelection: function(selection) {
++    $.each(constants_ODFNavBar, function(key, ref){
++      if(ref == selection){
++        document.location.hash = key;
++      }
++    });
++    this.setState({ activeNavBarItem: selection });
++  },
++
++  handleAlert: function(alertInfo) {
++    this.setState({ globalAlert: alertInfo });
++  },
++
++  render: function() {
++    var alertComp = null;
++    if (this.state.globalAlert.type != "") {
++       alertComp = <Alert bsStyle={this.state.globalAlert.type}>{this.state.globalAlert.message}</Alert>;
++    }
++
++    var contentComponent = <ODFDataLakePage alertCallback={this.handleAlert}/>;
++    if (this.state.activeNavBarItem == constants_ODFNavBar.odfDataLakePage) {
++       contentComponent = <ODFDataLakePage alertCallback={this.handleAlert}/>;
++    } else if (this.state.activeNavBarItem == constants_ODFNavBar.odfTermPage) {
++       contentComponent = <ODFTermPage alertCallback={this.handleAlert}/>;
++    }
++
++    var divStyle = {
++//      marginLeft: "80px",
++//      marginRight: "80px"
++    };
++
++    return (
++        <div>
++           <ODFNavBar activeKey={this.state.activeNavBarItem} selectCallback={this.handleNavBarSelection}></ODFNavBar>
++           <div style={divStyle}>
++              {alertComp}
++              {contentComponent}
++           </div>
++        </div>
++    );
++  }
++});
++
++var div = $("#odf-toplevel-div")[0];
++ReactDOM.render(<ODFClient/>, div);
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js b/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js
+new file mode 100755
+index 0000000..cf50075
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js
+@@ -0,0 +1,63 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var ODFGlobals = require("./odf-globals.js");
++
++var ConfigurationStore = {
++
++  // readUserDefinedProperties(successCallback, alertCallback) {
++   readConfig(successCallback, alertCallback) {
++	   if (alertCallback) {
++	     alertCallback({type: ""});
++	   }
++     // clear alert
++
++     return $.ajax({
++       url: ODFGlobals.apiPrefix + "settings",
++       dataType: 'json',
++       type: 'GET',
++       success: successCallback,
++       error: function(xhr, status, err) {
++         if (alertCallback) {
++            var msg = "Error while reading user defined properties: " + err.toString();
++            alertCallback({type: "danger", message: msg});
++         }
++       }
++      }).abort;
++   },
++
++   updateConfig(config, successCallback, alertCallback) {
++		if (alertCallback) {
++			 alertCallback({type: ""});
++		}
++
++	    return $.ajax({
++		       url: ODFGlobals.apiPrefix + "settings",
++		       contentType: "application/json",
++		       dataType: 'json',
++		       type: 'PUT',
++		       data: JSON.stringify(config),
++		       success: successCallback,
++		       error: function(xhr, status, err) {
++		         if (alertCallback) {
++		            var msg = "Error while reading user defined properties: " + err.toString();
++		            alertCallback({type: "danger", message: msg});
++		         }
++		       }
++	     }).abort;
++   }
++}
++
++module.exports = ConfigurationStore;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-console.js b/odf/odf-web/src/main/webapp/scripts/odf-console.js
+new file mode 100755
+index 0000000..aa70808
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-console.js
+@@ -0,0 +1,967 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++//css imports
++require("bootstrap/dist/css/bootstrap.min.css");
++require("bootstrap-material-design/dist/css/bootstrap-material-design.min.css");
++require("bootstrap-material-design/dist/css/ripples.min.css");
++require("roboto-font/css/fonts.css");
++
++
++//js imports
++var $ = require("jquery");
++var bootstrap = require("bootstrap");
++
++var React = require("react");
++var ReactDOM = require("react-dom");
++var LinkedStateMixin = require("react-addons-linked-state-mixin");
++var ReactBootstrap = require("react-bootstrap");
++
++var ODFGlobals = require("./odf-globals.js");
++var ODFStats = require("./odf-statistics.js");
++var ODFSettings = require("./odf-settings.js");
++var ODFServices = require("./odf-services.js");
++var ODFBrowser = require("./odf-metadata-browser.js").ODFMetadataBrowser;
++var ODFRequestBrowser = require("./odf-request-browser.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var configurationStore = require("./odf-utils.js").ConfigurationStore;
++var servicesStore = require("./odf-utils.js").ServicesStore;
++var AtlasHelper = require("./odf-utils.js").AtlasHelper;
++var AnnotationStoreHelper = require("./odf-utils.js").AnnotationStoreHelper;
++var OdfAnalysisRequest = require("./odf-analysis-request.js");
++var LogViewer = require("./odf-logs.js");
++//var Notifications = require("./odf-notifications.js");
++var NewAnalysisRequestButton = OdfAnalysisRequest.NewAnalysisRequestButton;
++var NewAnalysisRequestDialog = OdfAnalysisRequest.NewAnalysisRequestDialog;
++var NewCreateAnnotationsButton = OdfAnalysisRequest.NewCreateAnnotationsButton;
++var NewCreateAnnotationsDialog = OdfAnalysisRequest.NewCreateAnnotationsDialog;
++
++var Button = ReactBootstrap.Button;
++var Nav = ReactBootstrap.Nav;
++var NavItem = ReactBootstrap.NavItem;
++var Navbar = ReactBootstrap.Navbar;
++var NavDropdown = ReactBootstrap.NavDropdown;
++var MenuItem = ReactBootstrap.MenuItem;
++var Jumbotron = ReactBootstrap.Jumbotron;
++var Grid = ReactBootstrap.Grid;
++var Row = ReactBootstrap.Row;
++var Col = ReactBootstrap.Col;
++var Table = ReactBootstrap.Table;
++var Modal = ReactBootstrap.Modal;
++var Input = ReactBootstrap.Input;
++var Alert = ReactBootstrap.Alert;
++var Panel = ReactBootstrap.Panel;
++var Label = ReactBootstrap.Label;
++var Input = ReactBootstrap.Input;
++var ProgressBar = ReactBootstrap.ProgressBar;
++var Image = ReactBootstrap.Image;
++var ListGroup = ReactBootstrap.ListGroup;
++var ListGroupItem = ReactBootstrap.ListGroupItem;
++var Tabs = ReactBootstrap.Tabs;
++var Tab = ReactBootstrap.Tab;
++var Glyphicon = ReactBootstrap.Glyphicon;
++
++var PerServiceStatusGraph = ODFStats.PerServiceStatusGraph;
++var TotalAnalysisGraph = ODFStats.TotalAnalysisGraph;
++var SystemDiagnostics = ODFStats.SystemDiagnostics;
++
++////////////////////////////////////////////////////////////////
++// toplevel navigation bar
++
++const constants_ODFNavBar = {
++  gettingStarted: "navKeyGettingStarted",
++  configuration: "navKeyConfiguration",
++  monitor: "navKeyMonitor",
++  discoveryServices: "navKeyDiscoveryServices",
++  data: "navKeyData",
++  analysis: "navKeyAnalysis"
++}
++
++var ODFNavBar = React.createClass({
++   render: function() {
++       return (
++         <Navbar inverse>
++           <Navbar.Header>
++             <Navbar.Brand>
++               <b>Open Discovery Framework</b>
++             </Navbar.Brand>
++             <Navbar.Toggle />
++           </Navbar.Header>
++           <Navbar.Collapse>
++             <Nav pullRight activeKey={this.props.activeKey} onSelect={this.props.selectCallback}>
++               <NavItem eventKey={constants_ODFNavBar.gettingStarted} href="#">Getting Started</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.monitor} href="#">System Monitor</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.configuration} href="#">Settings</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.discoveryServices} href="#">Services</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.data} href="#">Data sets</NavItem>
++               <NavItem eventKey={constants_ODFNavBar.analysis} href="#">Analysis</NavItem>
++             </Nav>
++           </Navbar.Collapse>
++         </Navbar>
++       );
++   }
++});
++
++
++
++/////////////////////////////////////////////////////////////////////////////////////////
++// Configuration page
++
++var ConfigurationPage = React.createClass({
++  componentWillMount() {
++      this.props.alertCallback({type: ""});
++  },
++
++  render: function() {
++    return (
++    <div className="jumbotron">
++      <Tabs position="left" defaultActiveyKey={1}>
++        <Tab eventKey={1} title="General">
++          <ODFSettings.ODFConfigPage alertCallback={this.props.alertCallback}/>
++        </Tab>
++        <Tab eventKey={2} title="Spark settings">
++          <ODFSettings.SparkConfigPage alertCallback={this.props.alertCallback}/>
++        </Tab>
++        <Tab eventKey={3} title="User-defined">
++          <ODFSettings.UserDefinedConfigPage alertCallback={this.props.alertCallback}/>
++        </Tab>
++      </Tabs>
++      </div>
++      );
++  }
++
++});
++
++const GettingStartedPage = React.createClass({
++  getInitialState() {
++     return ({version: "NOTFOUND"});
++  },
++
++  componentWillMount() {
++     this.props.alertCallback({type: ""});
++     $.ajax({
++         url: ODFGlobals.engineUrl + "/version",
++         type: 'GET',
++         success: function(data) {
++             this.setState(data);
++         }.bind(this)
++       });
++  },
++
++  render: function() {
++    var divStyle = {
++      marginLeft: "80px",
++      marginRight: "80px"
++    };
++    return (
++      <Jumbotron>
++      <div style={divStyle}>
++         <h2>Welcome to the Open Discovery Framework Console</h2>
++         <p/>The "Open Discovery Framework" (ODF) is an open metadata-based platform
++         that strives to be a common home for different analytics technologies
++         that discover characteristics of data sets and relationships between
++         them (think "AppStore for discovery algorithms").
++         Using ODF, applications can leverage new discovery algorithms and their
++         results with minimal integration effort.
++         <p/>
++         This console lets you administer and configure your ODF system, as well as
++         run analyses and browse their results.
++         <p/>
++         <p><Button target="_blank" href="doc" bsStyle="primary">Open Documentation</Button></p>
++         <p><Button target="_blank" href="swagger" bsStyle="success">Show API Reference</Button></p>
++         <p/>
++		 Version: {this.state.version}
++         </div>
++       </Jumbotron>
++
++      )
++  }
++
++});
++
++/////////////////////////////////////////////////////////////////////
++// monitor page
++var StatusGraphs = React.createClass({
++
++	selectTab : function(key){
++		this.setState({key});
++	},
++
++	getInitialState() {
++	    return {
++	      key: "system_state"
++	    };
++	 },
++
++	render : function() {
++		var divStyle = {
++		     marginLeft: "20px"
++	    };
++
++		return (
++			<div>
++				<Tabs position="left" activeKey={this.state.key} onSelect={this.selectTab}>
++					<Tab eventKey={"system_state"} title="System state">
++						<div style={divStyle}>
++							<TotalAnalysisGraph visible={this.state.key == "system_state"} alertCallback={this.props.alertCallback}/>
++							<PerServiceStatusGraph visible={this.state.key == "system_state"} alertCallback={this.props.alertCallback}/>
++						</div>
++					</Tab>
++				    <Tab eventKey={"diagnostics"} title="Diagnostics">
++						<div style={divStyle}>
++							<SystemDiagnostics visible={this.state.key == "diagnostics"} alertCallback={this.props.alertCallback}/>
++						</div>
++					</Tab>
++					<Tab eventKey={"logs"} title="System logs">
++						<div style={divStyle}>
++							<LogViewer visible={this.state.key == "logs"} alertCallback={this.props.alertCallback}/>
++						</div>
++					</Tab>
++				</Tabs>
++			</div>
++         );
++	}
++
++
++});
++
++var MonitorPage = React.createClass({
++	mixins : [AJAXCleanupMixin],
++
++	getInitialState() {
++		return ( {
++				monitorStatusVisible: false,
++				monitorStatusStyle:"success",
++				monitorStatusMessage: "OK",
++				monitorWorkInProgress: false
++		});
++	},
++
++	componentWillMount() {
++	   this.props.alertCallback({type: ""});
++	},
++
++	checkHealth() {
++		this.setState({monitorWorkInProgress: true, monitorStatusVisible: false});
++	    var url = ODFGlobals.engineUrl + "/health";
++		var req = $.ajax({
++	         url: url,
++	         dataType: 'json',
++	         type: 'GET',
++	         success: function(data) {
++	        	 var status = data.status;
++	        	 var newState = {
++	        		monitorStatusVisible: true,
++	        		monitorWorkInProgress: false
++	        	 };
++
++	        	 if (status == "OK") {
++	        		 newState.monitorStatusStyle = "success";
++	        	 } else if (status == "WARNING") {
++	        		 newState.monitorStatusStyle = "warning";
++	        	 } else if (status == "ERROR") {
++	        		 newState.monitorStatusStyle = "danger";
++	        	 }
++	        	 // TODO show more than just the first message
++        		 newState.monitorStatusMessage = "Status: " + status + ". " + data.messages[0];
++
++	        	 this.setState(newState);
++	         }.bind(this),
++	         error: function(xhr, status, err) {
++	      	   if(this.isMounted()){
++	      		   this.setState({
++	        	   monitorStatusVisible: true,
++	        	   monitorStatusStyle:"danger",
++	        	   monitorStatusMessage: "An error occured: " + err.toString(),
++	        	   monitorWorkInProgress: false});
++	      	   };
++	         }.bind(this)
++	        });
++		this.storeAbort(req.abort);
++	},
++
++	performRestart : function(){
++		$.ajax({
++		      url: ODFGlobals.engineUrl + "/shutdown",
++		      contentType: "application/json",
++		      type: 'POST',
++		      data: JSON.stringify({restart: "true"}),
++		      success: function(data) {
++		  			this.setState({monitorStatusVisible : true, monitorStatusStyle: "info", monitorStatusMessage: "Restart in progress..."});
++		      }.bind(this),
++		      error: function(xhr, status, err) {
++		  			this.setState({monitorStatusVisible : true, monitorStatusStyle: "warning", monitorStatusMessage: "Restart request failed"});
++		      }.bind(this)
++		    });
++	},
++
++	render() {
++	  var divStyle = {
++		      marginLeft: "20px"
++		    };
++	  var monitorStatus = null;
++	  if (this.state.monitorStatusVisible) {
++		  monitorStatus = <Alert bsStyle={this.state.monitorStatusStyle}>{this.state.monitorStatusMessage}</Alert>;
++	  }
++	  var progressIndicator = null;
++	  if (this.state.monitorWorkInProgress) {
++		  progressIndicator = <Image src="img/lg_proc.gif" rounded />;
++	  }
++	  return (
++	    	<div className="jumbotron">
++	    	<h3>System health</h3>
++	    	  <div style={divStyle}>
++	           	<Button className="btn-raised" bsStyle="primary" disabled={this.state.monitorWorkInProgress} onClick={this.checkHealth}>Check health</Button>
++	           	<Button className="btn-raised" bsStyle="warning" onClick={this.performRestart}>Restart ODF</Button>
++	           	{progressIndicator}
++	           	{monitorStatus}
++	           	<hr/>
++	           	<div>
++	           	</div>
++	           	<StatusGraphs alertCallback={this.props.alertCallback}/>
++	    	  </div>
++	    	</div>
++	  );
++	}
++
++});
++
++//////////////////////////////////////////////////////
++// discovery services page
++var DiscoveryServicesPage = React.createClass({
++  mixins : [AJAXCleanupMixin],
++
++  getInitialState() {
++	  return ({discoveryServices: []});
++  },
++
++  loadDiscoveryServices() {
++	  // clear alert
++    this.props.alertCallback({type: "", message: ""});
++
++	var req = $.ajax({
++	    url: ODFGlobals.servicesUrl,
++	    dataType: 'json',
++	    type: 'GET',
++	    success: function(data) {
++	       this.setState({discoveryServices: data});
++	    }.bind(this),
++	    error: function(xhr, status, err) {
++    	   if(this.isMounted()){
++    		   var msg = "Error while reading ODF services: " + err.toString();
++    		   this.props.alertCallback({type: "danger", message: msg});
++    	   }
++	    }.bind(this)
++	  });
++
++	this.storeAbort(req.abort);
++  },
++
++  componentDidMount() {
++	  this.loadDiscoveryServices();
++  },
++
++  render: function() {
++	var services = $.map(
++        this.state.discoveryServices,
++        function(dsreg) {
++          return <tr key={dsreg.id}>
++                  <td>
++                     <ODFServices.DiscoveryServiceInfo dsreg={dsreg} refreshCallback={this.loadDiscoveryServices} alertCallback={this.props.alertCallback}/>
++                  </td>
++                 </tr>
++        }.bind(this)
++    );
++
++	return (
++	     <div className="jumbotron">
++           <h3>Services</h3>
++           This page lets you manage the services for this ODF instance.
++           You can add services manually by clicking the <em>Add Service</em> button or
++           register remote services (e.g. deployed on Bluemix) you have built with the ODF service developer kit by
++           clicking the <em>Register remote services</em> link.
++           <p/>
++					 <ODFServices.AddDiscoveryServiceButton refreshCallback={this.loadDiscoveryServices}/>
++           <p/>
++	       	<Table bordered responsive>
++	         <tbody>
++	         {services}
++             </tbody>
++          </Table>
++	     </div>
++	);
++  }
++
++});
++
++//////////////////////////////////////////////////////////////
++// Analysis Page
++var AnalysisRequestsPage = React.createClass({
++  mixins : [AJAXCleanupMixin],
++
++  getInitialState() {
++      return {recentAnalysisRequests: null, config: {}, services : []};
++  },
++
++  componentWillReceiveProps : function(nextProps){
++  	var selection = null;
++	if(nextProps.navAddition && nextProps.navAddition.length > 0 && nextProps.navAddition[0] && nextProps.navAddition[0].length > 0){
++		var jsonAddition = {};
++
++		try{
++			jsonAddition = JSON.parse(decodeURIComponent(nextProps.navAddition[0]));
++		}catch(e){
++
++		}
++
++		if(jsonAddition.requestId){
++			$.each(this.state.recentAnalysisRequests, function(key, tracker){
++				var reqId = jsonAddition.requestId;
++
++				if(tracker.request.id == reqId){
++					selection = reqId;
++				}
++			}.bind(this));
++		}else if(jsonAddition.id && jsonAddition.repositoryId){
++			selection = jsonAddition;
++		}
++	}
++
++	if(selection != this.state.selection){
++		this.setState({selection : selection});
++	}
++  },
++
++  componentDidMount() {
++	  if(!this.refreshInterval){
++		  this.refreshInterval = window.setInterval(this.refreshAnalysisRequests, 5000);
++	  }
++      this.initialLoadServices();
++      this.initialLoadRecentAnalysisRequests();
++  },
++
++  componentWillUnmount : function() {
++	  if(this.refreshInterval){
++		  window.clearInterval(this.refreshInterval);
++	  }
++  },
++
++  getDiscoveryServiceNameFromId(id) {
++      var servicesWithSameId = this.state.services.filter(
++         function(dsreg) {
++             return dsreg.id == id;
++         }
++      );
++      if (servicesWithSameId.length > 0) {
++        return servicesWithSameId[0].name;
++      }
++      return null;
++  },
++
++  refreshAnalysisRequests : function(){
++	  var req = configurationStore.readConfig(
++		      function(config) {
++		          this.setState({config: config});
++		          const url = ODFGlobals.analysisUrl + "?offset=0&limit=20";
++		          $.ajax({
++		            url: url,
++		            dataType: 'json',
++		            type: 'GET',
++		            success: function(data) {
++		            	$.each(data.analysisRequestTrackers, function(key, tracker){
++		                	//collect service names by id and add to json so that it can be displayed later
++		            		$.each(tracker.discoveryServiceRequests, function(key, request){
++			            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
++			            		request.discoveryServiceName = serviceName;
++		            		}.bind(this));
++		            	}.bind(this));
++		                this.setState({recentAnalysisRequests: data.analysisRequestTrackers});
++		            }.bind(this),
++		            error: function(xhr, status, err) {
++		            	if(status != "abort" ){
++		            		console.error(url, status, err.toString());
++		            	}
++		            	if(this.isMounted()){
++		            	  var msg = "Error while refreshing recent analysis requests: " + err.toString();
++		            	  this.props.alertCallback({type: "danger", message: msg});
++		            	}
++		            }.bind(this)
++		          });
++		      }.bind(this),
++	      this.props.alertCallback
++	    );
++
++	    this.storeAbort(req.abort);
++  },
++
++  initialLoadServices() {
++	this.setState({services: null});
++
++    var req = servicesStore.getServices(
++      function(services) {
++          this.setState({services: services});
++      }.bind(this),
++      this.props.alertCallback
++    );
++
++    this.storeAbort(req.abort);
++  },
++
++  initialLoadRecentAnalysisRequests() {
++	this.setState({recentAnalysisRequests: null});
++
++    var req = configurationStore.readConfig(
++      function(config) {
++          this.setState({config: config});
++          const url = ODFGlobals.analysisUrl + "?offset=0&limit=20";
++          $.ajax({
++            url: url,
++            dataType: 'json',
++            type: 'GET',
++            success: function(data) {
++            	var selection = null;
++            	$.each(data.analysisRequestTrackers, function(key, tracker){
++            		if(this.props.navAddition && this.props.navAddition.length > 0 && this.props.navAddition[0].length > 0){
++            			var reqId = "";
++            			try{
++            				reqId = JSON.parse(decodeURIComponent(this.props.navAddition[0])).requestId
++            			}catch(e){
++
++            			}
++            			if(tracker.request.id == reqId){
++            				selection = reqId;
++            			}
++        			}
++
++                	//collect service names by id and add to json so that it can be displayed later
++            		$.each(tracker.discoveryServiceRequests, function(key, request){
++	            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
++	            		request.discoveryServiceName = serviceName;
++            		}.bind(this));
++            	}.bind(this));
++
++            	var newState = {recentAnalysisRequests: data.analysisRequestTrackers};
++            	if(selection){
++            		newState.selection = selection;
++            	}
++
++               this.setState(newState);
++            }.bind(this),
++            error: function(xhr, status, err) {
++            	if(status != "abort" ){
++            		console.error(url, status, err.toString());
++            	}
++            	if(this.isMounted()){
++            	  var msg = "Error while loading recent analysis requests: " + err.toString();
++            	  this.props.alertCallback({type: "danger", message: msg});
++            	}
++            }.bind(this)
++          });
++      }.bind(this),
++      this.props.alertCallback
++    );
++
++    this.storeAbort(req.abort);
++  },
++
++  cancelAnalysisRequest(tracker) {
++      var url = ODFGlobals.analysisUrl + "/" + tracker.request.id + "/cancel";
++
++      $.ajax({
++          url: url,
++          type: 'POST',
++          success: function() {
++			  if(this.isMounted()){
++				  this.refreshAnalysisRequests();
++			  }
++          }.bind(this),
++          error: function(xhr, status, err) {
++        	  if(status != "abort" ){
++          		console.error(url, status, err.toString());
++        	  }
++
++        	  var errMsg = null;
++        	  if(err == "Forbidden"){
++        		  errMsg = "only analyses that have not been started yet can be cancelled!";
++        	  }else if(err == "Bad Request"){
++        		  errMsg = "the requested analysis could not be found!";
++        	  }
++        	  if(this.isMounted()){
++				  var msg = "Analysis could not be cancelled: " + (errMsg ? errMsg : err.toString());
++				  if(this.props.alertCallback){
++					  this.props.alertCallback({type: "danger", message: msg});
++				  }
++        	  }
++          }.bind(this)
++      });
++  },
++
++  viewResultAnnotations : function(target){
++	  this.setState({
++			resultAnnotations : null,
++			showAnnotations: true
++		});
++	  var req = AnnotationStoreHelper.loadAnnotationsForRequest(target.request.id,
++			function(data){
++				this.setState({
++					resultAnnotations : data.annotations
++				});
++			}.bind(this),
++			function(error){
++				console.error('Annotations could not be loaded ' + error);
++			}
++		);
++	  this.storeAbort(req.abort);
++  },
++
++  viewInAtlas : function(target){
++	  var repo =  target.request.dataSets[0].repositoryId;
++	  repo = repo.split("atlas:")[1];
++      var annotationQueryUrl = repo + "/#!/search?query=from%20ODFAnnotation%20where%20analysisRun%3D'"+ target.request.id + "'";
++	  var win = window.open(annotationQueryUrl, '_blank');
++  },
++
++  render : function() {
++    var loadingImg = null;
++    if(this.state.recentAnalysisRequests == null){
++    	loadingImg = <Image src="img/lg_proc.gif" rounded />;
++    }
++    var requestActions = [
++                           {
++                        	   assetType: ["requests"],
++                        	   actions : [
++                	              {
++                	            	  label: "Cancel analysis",
++                	            	  func: this.cancelAnalysisRequest,
++                	            	  filter: function(obj){
++                	            		  var val = obj.status;
++                	            		  if (val == "INITIALIZED" || val == "IN_DISCOVERY_SERVICE_QUEUE") {
++                	            			  return true;
++                	            		  }
++                	            		  return false;
++                	            	  }
++                	              },
++                	              {
++                	            	  label: "View results",
++                	            	  func: this.viewResultAnnotations
++                	              },
++                	              {
++                	            	  label: "View results in atlas",
++                	            	  func: this.viewInAtlas
++                	              }
++                	           ]
++                           	}
++                           ];
++    return (
++    		<div className="jumbotron">
++			   <h3>Analysis requests</h3>
++			   <div>
++		        Click Refresh to refresh the list of existing analysis requests.
++		        Only the last 20 valid requests are shown.
++		         <p/>
++		        <NewAnalysisRequestButton bsStyle="primary" onClose={this.refreshAnalysisRequests} alertCallback={this.props.alertCallback}/>
++		        <NewCreateAnnotationsButton bsStyle="primary" onClose={this.refreshAnalysisRequests} alertCallback={this.props.alertCallback}/>
++
++		        <Button bsStyle="success" onClick={this.refreshAnalysisRequests}>Refresh</Button> &nbsp;
++            	{loadingImg}
++		        <ODFRequestBrowser registeredServices={this.state.config.registeredServices} actions={requestActions} ref="requestBrowser" selection={this.state.selection} assets={this.state.recentAnalysisRequests}/>
++		       </div>
++            	<Modal show={this.state.showAnnotations} onHide={function(){this.setState({showAnnotations : false})}.bind(this)}>
++	            	<Modal.Header closeButton>
++	                	<Modal.Title>Analysis results for analysis {this.state.resultTarget}</Modal.Title>
++		             </Modal.Header>
++		             <Modal.Body>
++		             	<ODFBrowser ref="resultBrowser" type={"annotations"} assets={this.state.resultAnnotations} />
++		             </Modal.Body>
++		             <Modal.Footer>
++		            <Button onClick={function(){this.setState({showAnnotations : false})}.bind(this)}>Close</Button>
++		            </Modal.Footer>
++            	</Modal>
++		    </div>
++    );
++  }
++
++});
++
++var AnalysisDataSetsPage = React.createClass({
++  mixins : [AJAXCleanupMixin],
++
++  componentDidMount() {
++      this.loadDataFiles();
++      this.loadTables();
++      this.loadDocuments();
++  },
++
++  getInitialState() {
++      return ({	showDataFiles: true,
++    	  		showHideDataFilesIcon: "chevron-up",
++    	  		showTables: true,
++    	  		showHideTablesIcon: "chevron-up",
++    	  		showDocuments: true,
++    	  		showHideDocumentsIcon: "chevron-up",
++    	  		config: null});
++  },
++
++  componentWillReceiveProps : function(nextProps){
++	if(nextProps.navAddition && nextProps.navAddition.length > 0 && nextProps.navAddition[0]){
++		this.setState({selection : nextProps.navAddition[0]});
++	}else{
++		this.setState({selection : null});
++	}
++  },
++
++  showHideDataFiles() {
++	  this.setState({showDataFiles: !this.state.showDataFiles, showHideDataFilesIcon: (!this.state.showDataFiles? "chevron-up" : "chevron-down")});
++  },
++
++  showHideTables() {
++	  this.setState({showTables: !this.state.showTables, showHideTablesIcon: (!this.state.showTables? "chevron-up" : "chevron-down")});
++  },
++
++  showHideDocuments() {
++	  this.setState({showDocuments: !this.state.showDocuments, showHideDocumentsIcon: (!this.state.showDocuments ? "chevron-up" : "chevron-down")});
++  },
++
++  createAnnotations : function(target){
++		this.setState({showCreateAnnotationsDialog: true, selectedAsset : target.reference.id});
++  },
++
++  startAnalysis : function(target){
++		this.setState({showAnalysisRequestDialog: true, selectedAsset : target.reference.id});
++  },
++
++  viewInAtlas : function(target){
++	  var win = window.open(target.reference.url, '_blank');
++	  win.focus();
++  },
++
++  loadDataFiles : function(){
++	  var  resultQuery = "from DataFile";
++	  this.setState({
++			dataFileAssets : null
++	  });
++	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
++			function(data){
++				this.setState({
++					dataFileAssets : data
++				});
++			}.bind(this),
++			function(error){
++
++			}
++		);
++	  this.storeAbort(req.abort);
++  },
++
++  loadTables : function(){
++	  var  resultQuery = "from Table";
++	  this.setState({
++			tableAssets : null
++	  });
++	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
++			function(data){
++				this.setState({
++					tableAssets : data
++				});
++			}.bind(this),
++			function(error){
++
++			}
++		);
++	  this.storeAbort(req.abort);
++  },
++
++  loadDocuments : function(){
++	  var  resultQuery = "from Document";
++	  this.setState({
++			docAssets : null
++	  });
++	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
++			function(data){
++				this.setState({
++					docAssets : data
++				});
++			}.bind(this),
++			function(error){
++
++			}
++		);
++	  this.storeAbort(req.abort);
++  },
++
++  render() {
++    var actions = [
++             {
++        	   assetType: ["DataFiles", "Tables", "Documents"],
++        	   actions : [
++	              {
++	            	  label: "Start analysis (annotation types)",
++	            	  func: this.createAnnotations
++	              } ,
++	              {
++	            	  label: "Start analysis (service sequence)",
++	            	  func: this.startAnalysis
++	              } ,
++	              {
++	            	  label: "View in atlas",
++	            	  func: this.viewInAtlas
++	              }
++        	    ]
++	         }
++	     ];
++
++    return (
++    		<div className="jumbotron">
++    		   <h3>Data sets</h3>
++		       <div>
++		       	 <NewAnalysisRequestDialog alertCallback={this.props.alertCallback} dataSetId={this.state.selectedAsset} show={this.state.showAnalysisRequestDialog} onClose={function(){this.setState({showAnalysisRequestDialog: false});}.bind(this)} />
++		       	 <NewCreateAnnotationsDialog alertCallback={this.props.alertCallback} dataSetId={this.state.selectedAsset} show={this.state.showCreateAnnotationsDialog} onClose={function(){this.setState({showCreateAnnotationsDialog: false});}.bind(this)} />
++		         Here are all data sets of the metadata repository that are available for analysis.
++		         <p/>
++		         <Panel collapsible expanded={this.state.showDataFiles} header={
++		        		 <div style={{textAlign:"right"}}>
++				         	<span style={{float: "left"}}>Data Files</span>
++				         	<Button bsStyle="primary" onClick={function(){this.loadDataFiles();}.bind(this)}>
++				         		Refresh
++				         	</Button>
++			         		<Button onClick={this.showHideDataFiles}>
++			         			<Glyphicon glyph={this.state.showHideDataFilesIcon} />
++			         		</Button>
++			         	</div>}>
++		            	<ODFBrowser ref="dataFileBrowser" type={"DataFiles"} selection={this.state.selection} actions={actions} assets={this.state.dataFileAssets} />
++		         </Panel>
++		         <Panel collapsible expanded={this.state.showTables} header={
++		        		 <div style={{textAlign:"right"}}>
++				         	<span style={{float: "left"}}>Relational Tables</span>
++				         	<Button bsStyle="primary" onClick={function(){this.loadTables();}.bind(this)}>
++				         		Refresh
++				         	</Button>
++			         		<Button onClick={this.showHideTables}>
++			         			<Glyphicon glyph={this.state.showHideTablesIcon} />
++			         		</Button>
++			         	</div>}>
++		            	<ODFBrowser ref="tableBrowser" type={"Tables"} actions={actions} assets={this.state.tableAssets} />
++		         </Panel>
++		         <Panel collapsible expanded={this.state.showDocuments}  header={
++		        		 <div style={{textAlign:"right"}}>
++		        		 	<span style={{float: "left"}}>Documents</span>
++		        		 	<Button bsStyle="primary" onClick={function(){this.loadDocuments();}.bind(this)}>
++		        		 		Refresh
++		        		 	</Button>
++		        		 	<Button onClick={this.showHideDocuments}>
++			         			<Glyphicon glyph={this.state.showHideDocumentsIcon} />
++			         		</Button>
++			         	</div>}>
++		     			<ODFBrowser ref="docBrowser" type={"Documents"} actions={actions} assets={this.state.docAssets}/>
++		         </Panel>
++		       </div>
++		    </div>
++		     );
++  }
++
++});
++
++
++////////////////////////////////////////////////////////////////////////
++// main component
++var ODFUI = React.createClass({
++
++   componentDidMount: function() {
++	   $(window).bind("hashchange", this.parseUrl);
++	   this.parseUrl();
++   },
++
++   parseUrl : function(){
++	  var target = constants_ODFNavBar.gettingStarted;
++	  var navAddition = null;
++	  var hash = document.location.hash;
++	  if(hash && hash.length > 1){
++		  hash = hash.split("#")[1];
++		  var split = hash.split("/");
++		  var navHash = split[0];
++		  if(split.length > 0){
++			  navAddition = split.slice(1);
++		  }
++		  if(constants_ODFNavBar[navHash]){
++			  target = constants_ODFNavBar[navHash];
++		  }
++	  }
++	  this.setState({
++		  activeNavBarItem: target,
++	      navAddition: navAddition}
++	  );
++  },
++
++  getInitialState: function() {
++	  return ({
++	      activeNavBarItem: constants_ODFNavBar.gettingStarted,
++	      navAddition: null,
++	      globalAlert: {
++	        type: "",
++	        message: ""
++	      }
++	  });
++  },
++
++  handleNavBarSelection: function(selection) {
++	  $.each(constants_ODFNavBar, function(key, ref){
++		  if(ref == selection){
++			  document.location.hash = key;
++		  }
++	  });
++    this.setState({ activeNavBarItem: selection });
++  },
++
++  handleAlert: function(alertInfo) {
++    this.setState({ globalAlert: alertInfo });
++  },
++
++  render: function() {
++    var alertComp = null;
++    if (this.state.globalAlert.type != "") {
++       alertComp = <Alert bsStyle={this.state.globalAlert.type}>{this.state.globalAlert.message}</Alert>;
++    }
++
++    var contentComponent = <GettingStartedPage alertCallback={this.handleAlert}/>;
++    if (this.state.activeNavBarItem == constants_ODFNavBar.configuration) {
++       contentComponent = <ConfigurationPage alertCallback={this.handleAlert}/>;
++    } else if (this.state.activeNavBarItem == constants_ODFNavBar.discoveryServices) {
++       contentComponent = <DiscoveryServicesPage alertCallback={this.handleAlert}/>;
++    } else if (this.state.activeNavBarItem == constants_ODFNavBar.monitor) {
++       contentComponent = <MonitorPage alertCallback={this.handleAlert}/>;
++    } else if (this.state.activeNavBarItem == constants_ODFNavBar.analysis) {
++       contentComponent = <AnalysisRequestsPage navAddition={this.state.navAddition} alertCallback={this.handleAlert}/>;
++    } else if (this.state.activeNavBarItem == constants_ODFNavBar.data) {
++       contentComponent = <AnalysisDataSetsPage navAddition={this.state.navAddition} alertCallback={this.handleAlert}/>;
++    }
++
++    var divStyle = {
++      marginLeft: "80px",
++      marginRight: "80px"
++    };
++
++    return (
++        <div>
++           <ODFNavBar activeKey={this.state.activeNavBarItem} selectCallback={this.handleNavBarSelection}></ODFNavBar>
++           <div style={divStyle}>
++              {alertComp}
++              {contentComponent}
++           </div>
++        </div>
++    );
++  }
++});
++
++var div = $("#odf-toplevel-div")[0];
++ReactDOM.render(<ODFUI/>, div);
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-globals.js b/odf/odf-web/src/main/webapp/scripts/odf-globals.js
+new file mode 100755
+index 0000000..d67a2d3
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-globals.js
+@@ -0,0 +1,54 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++const CONTEXT_ROOT = ""; // window.location.origin + "/" + (window.location.pathname.split("/")[1].length > 0 ? window.location.pathname.split("/")[1] + "/" : "");
++const API_PREFIX = CONTEXT_ROOT + API_PATH;
++const SERVICES_URL = API_PREFIX + "services";
++const ANALYSIS_URL = API_PREFIX + "analyses";
++const ENGINE_URL = API_PREFIX + "engine";
++const CONFIG_URL = API_PREFIX + "config";
++const METADATA_URL = API_PREFIX + "metadata";
++const IMPORT_URL = API_PREFIX + "import";
++const ANNOTATIONS_URL = API_PREFIX + "annotations";
++
++var OdfUrls = {
++	"contextRoot": CONTEXT_ROOT,
++	"apiPrefix": API_PREFIX,
++	"servicesUrl": SERVICES_URL,
++	"analysisUrl": ANALYSIS_URL,
++	"engineUrl": ENGINE_URL,
++	"configUrl": CONFIG_URL,
++	"metadataUrl": METADATA_URL,
++	"importUrl": IMPORT_URL,
++	"annotationsUrl": ANNOTATIONS_URL,
++
++	getPathValue: function(obj, path) {
++	    var value = obj;
++        $.each(path.split("."),
++            function(propKey, prop) {
++               // if value is null, do nothing
++               if (value) {
++                   if(value[prop] != null && value[prop] != undefined){
++                       value = value[prop];
++                   } else {
++                       value = null;
++                   }
++               }
++           }
++        );
++        return value;
++	}
++};
++
++module.exports = OdfUrls;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-logs.js b/odf/odf-web/src/main/webapp/scripts/odf-logs.js
+new file mode 100755
+index 0000000..ecca602
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-logs.js
+@@ -0,0 +1,83 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ReactDOM = require("react-dom");
++var d3 = require("d3");
++var ReactBootstrap = require("react-bootstrap");
++var ReactD3 = require("react-d3-components");
++var ODFGlobals = require("./odf-globals.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var Input = ReactBootstrap.Input;
++
++var REFRESH_DELAY = 5000;
++
++var ODFLogViewer = React.createClass({
++	mixins : [AJAXCleanupMixin],
++
++	getInitialState : function(){
++		return {logLevel : "ALL", log : ""};
++	},
++
++	getLogs : function() {
++		const url = ODFGlobals.engineUrl + "/log?numberOfLogs=50&logLevel=" + this.state.logLevel;
++        var req = $.ajax({
++            url: url,
++            contentType: "text/plain",
++            type: 'GET',
++            success: function(data) {
++               this.setState({log: data});
++            }.bind(this),
++            error: function(xhr, status, err) {
++              var msg = "ODF log request failed, " + err.toString();
++              this.props.alertCallback({type: "danger", message: msg});
++            }.bind(this)
++        });
++
++        this.storeAbort(req.abort);
++	},
++
++	componentWillMount : function() {
++		this.getLogs();
++	},
++
++	componentWillUnmount () {
++	    this.refreshInterval && clearInterval(this.refreshInterval);
++	    this.refreshInterval = false;
++	},
++
++	componentWillReceiveProps: function(nextProps){
++		if(!nextProps.visible){
++			 this.refreshInterval && clearInterval(this.refreshInterval);
++			 this.refreshInterval = false;
++		}else if(!this.refreshInterval){
++			this.refreshInterval = window.setInterval(this.getLogs, REFRESH_DELAY);
++		}
++	},
++	render : function(){
++		return (<div>
++					<h4>ODF system logs</h4>
++					<h5>(This only works for the node this web application is running on, logs from other ODF nodes in a clustered environment will not be displayed)</h5>
++					<Input label="Log level:" type="select" onChange={(el) => {this.setState({logLevel : el.target.value}); this.getLogs()}} value={this.state.logLevel}>
++					<option value="ALL">ALL</option>
++					<option value="FINE">FINE</option>
++					<option value="INFO">INFO</option>
++					<option value="WARNING">WARNING</option>
++				</Input>
++				<textarea disabled style={{width: '100%', height: '700px'}} value={this.state.log} /></div>);
++	}
++});
++
++module.exports = ODFLogViewer;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js b/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js
+new file mode 100755
+index 0000000..d7072dd
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js
+@@ -0,0 +1,661 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ReactBootstrap = require("react-bootstrap");
++
++var Panel = ReactBootstrap.Panel;
++var Table = ReactBootstrap.Table;
++var Label = ReactBootstrap.Label;
++var Image = ReactBootstrap.Image;
++var Modal = ReactBootstrap.Modal;
++var Button = ReactBootstrap.Button;
++var FormControls = ReactBootstrap.FormControls;
++var ListGroup = ReactBootstrap.ListGroup;
++var ListGroupItem = ReactBootstrap.ListGroupItem;
++
++var ODFGlobals = require("./odf-globals.js");
++var UISpec = require("./odf-ui-spec.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var Utils = require("./odf-utils.js")
++var AtlasHelper = Utils.AtlasHelper;
++var URLHelper = Utils.URLHelper;
++
++var ODFBrowser = {
++
++	//set rowReferences property and pass an array of atlas references {id : ..., repositoryId: ...}, these rows will then be fetched
++	//or set rowAssets property and pass an array of data that is supposed to be displayed as is
++	ODFPagingTable : React.createClass({
++
++		mixins : [AJAXCleanupMixin],
++
++		getInitialState : function(){
++			var pageSize = (this.props.pageSize ? this.props.pageSize : 5);
++			var rowReferences = this.props.rowReferences;
++			var rowAssets = this.props.rowAssets;
++			var max = (rowReferences ? rowReferences.length : (rowAssets ? rowAssets.length : 0));
++			var pageRows = (rowAssets ? rowAssets.slice(0, pageSize) : null);
++
++			return {
++				pageRows : pageRows,
++				max : 0, tablePage : 0,
++				pageSize : pageSize,
++				max: max,
++				tablePage: 0,
++				rowReferenceLoadingAborts : []
++			};
++		},
++
++		componentDidMount : function() {
++			if(this.props.rowReferences){
++				var pagerowReferences = this.props.rowReferences.slice(0, this.state.pageSize);
++				this.loadRows(pagerowReferences);
++			}
++		},
++
++		componentWillReceiveProps : function(nextProps){
++			if(!this.isMounted()){
++				return;
++			}
++			this.setStateFromProps(nextProps);
++		},
++
++		setStateFromProps : function(nextProps){
++			if(nextProps.rowReferences && !Utils.arraysEqual(this.props.rowReferences, nextProps.rowReferences)){
++				this.setState({max: nextProps.rowReferences.length, tablePage: 0});
++				var pagerowReferences = nextProps.rowReferences.slice(0, this.state.pageSize);
++				this.loadRows(pagerowReferences);
++			}else if(nextProps.rowAssets && !Utils.arraysEqual(this.props.rowAssets, nextProps.rowAssets)){
++				var rows = nextProps.rowAssets.slice(0, this.state.pageSize);
++				this.setState({pageRows : rows, max: nextProps.rowAssets.length, tablePage: 0});
++			}
++		},
++
++		getType : function(){
++			if(this.props.assetType){
++				return this.props.assetType;
++			}else if(this.state.pageRows && this.state.pageRows.length > 0 && this.state.pageRows[0].type){
++				return this.state.pageRows[0].type;
++			}
++		},
++
++		getUISpec : function(){
++			if(this.props.spec){
++				return this.props.spec;
++			}
++			return UISpec[this.getType()];
++		},
++
++		loadRows : function(rowReferences){
++			$.each(this.state.rowReferenceLoadingAborts, function(key, abort){
++				if(abort && abort.call){
++					abort.call();
++				}
++			});
++
++			this.setState({pageRows: [], rowReferenceLoadingAborts: []});
++
++			var reqs = AtlasHelper.loadAtlasAssets(rowReferences,
++				function(rowAsset){
++					var rowData = this.state.pageRows;
++					rowData.push(rowAsset);
++					if(this.isMounted()){
++						this.setState({pageRows : rowData});
++					}
++					if(rowReferences && rowData && rowData.length == rowReferences.length && this.props.onLoad){
++						this.props.onLoad(rowData);
++					}
++				}.bind(this),
++				function(err){
++
++				}
++			);
++			var aborts = [];
++			$.each(reqs, function(key, val){
++				var aborts = this.state.rowReferenceLoadingAborts;
++				aborts.push(val.abort);
++				this.setState({rowReferenceLoadingAborts: aborts});
++			}.bind(this));
++
++			this.storeAbort(aborts);
++		},
++
++		previousPage : function(){
++			if(this.state.tablePage > -1){
++				var tablePage = this.state.tablePage - 1;
++				this.setState({tablePage : tablePage});
++				if(this.props.rowAssets){
++					var rows = this.props.rowAssets.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
++					this.setState({pageRows : rows});
++				}else if(this.props.rowReferences){
++					var rowRefs = this.props.rowReferences.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
++					this.loadRows(rowRefs);
++				}
++			}
++		},
++
++		nextPage : function(){
++			var max = this.state.max;
++			if((this.state.tablePage * this.state.pageSize) < max){
++				var tablePage = this.state.tablePage + 1;
++				this.setState({tablePage : tablePage});
++				if(this.props.rowAssets){
++					var rows = this.props.rowAssets.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
++					this.setState({pageRows : rows})
++				}else if(this.props.rowReferences){
++					var rows = this.props.rowReferences.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
++					this.loadRows(rows);
++				}
++			}
++		},
++
++		sortAlphabetical : function(a, b){
++			if(this.getUISpec() && this.getUISpec().attributes){
++				var attrs = this.getUISpec().attributes;
++				var sortProp = null;
++				for(var no = 0; no < attrs.length; no++){
++					if(attrs[no].sort == true){
++						sortProp = attrs[no].key;
++						var aProp = a[sortProp].toLowerCase();
++						var bProp = b[sortProp].toLowerCase();
++						return ((aProp < bProp) ? -1 : ((aProp > bProp) ? 1 : 0));
++					}
++				}
++			}
++			return 0;
++		},
++
++		onRowClick : function(rowData){
++			if(this.props.onRowClick){
++				var type = this.getType();
++				if(type){
++					//new type is singular of list type ...
++					type = type.substring(0, type.length - 1);
++				}
++				this.props.onRowClick(rowData, type);
++			}
++		},
++
++		parseValue : function(value){
++		    if (value) {
++		        if(Array.isArray(value)){
++		            return value.length;
++		        }else if(typeof value === "object" && value.id && value.url && value.repositoryId){
++		            return <a href={value.url}>{value.id}</a>;
++		        }else if(typeof value === "object"){
++		            return JSON.stringify(value);
++		        }
++		    }
++			return value;
++		},
++
++		render : function(){
++			var loadingImg = <Image src="img/lg_proc.gif" rounded />;
++			var contentRows = [];
++			var pageIndicator = "(0-0)";
++			if(this.state.pageRows){
++				loadingImg = null;
++				this.state.pageRows.sort(this.sortAlphabetical);
++				$.each(this.state.pageRows, function(key, rowData){
++					var displayProperties = [];
++					var icon = null;
++					if(this.getUISpec()){
++						displayProperties = this.getUISpec().attributes;
++						if(this.getUISpec().icon){
++							icon = this.getUISpec().icon;
++						}
++					}else{
++						$.each(rowData, function(propName, val){
++							var label = propName;
++							if(label && label[0]){
++								label = label[0].toUpperCase() + label.slice(1);
++							}
++							displayProperties.push({key: propName, label: label});
++						});
++					}
++
++					var colCss = {};
++					if(this.props.actions){
++						colCss.paddingTop = "26px";
++					}
++					var columns = [<td style={colCss} key={"iconCol" + key}>{icon}</td>];
++					$.each(displayProperties,
++						function(key, propObj){
++							//properties can be a path such as prop1.prop2
++					        var value = ODFGlobals.getPathValue(rowData, propObj.key);
++
++							if(propObj.func){
++								value = propObj.func(value, rowData);
++							}else{
++								value = this.parseValue(value);
++							}
++
++							var col = <td style={colCss} key={propObj.key}>{value}</td>;
++							columns.push(col);
++						}.bind(this)
++					);
++
++					if(this.props.actions){
++						var btns = [];
++						$.each(this.props.actions, function(key, obj){
++							if(obj.assetType.indexOf(this.getType()) > -1){
++									$.each(obj.actions, function(actKey, action){
++										if((action.filter && action.filter(rowData)) || !action.filter){
++											var btn = <div key={actKey}><Button onClick={function(e){e.stopPropagation(); action.func(rowData);}}>{action.label}</Button><br/></div>;
++											btns.push(btn);
++										}
++									});
++							}
++						}.bind(this));
++						columns.push(<td key={"actionBtns"}>{btns}</td>);
++					}
++
++					var rowCss = {};
++					if(this.props.onRowClick){
++						rowCss.cursor = "pointer";
++					}
++
++					var row = <tr style={rowCss} onClick={function(){this.onRowClick(rowData);}.bind(this)} key={key}>
++								{columns}
++							  </tr>;
++					contentRows.push(row);
++				}.bind(this));
++
++				var max = this.state.max;
++				var min = (max > 0 ? (this.state.tablePage * this.state.pageSize + 1)  : 0);
++				pageIndicator = "(" + min + "-";
++				if((this.state.tablePage + 1) * this.state.pageSize >= max){
++					pageIndicator += max + ")";
++				}else{
++					pageIndicator += (this.state.tablePage + 1) * this.state.pageSize + ")";
++				}
++			}
++
++			var header = [];
++			var lbls = [""];
++
++			if(this.getUISpec()){
++				$.each(this.getUISpec().attributes, function(key, propObj){
++					lbls.push(propObj.label);
++				});
++			}else if(this.state.pageRows && this.state.pageRows.length > 0){
++				$.each(this.state.pageRows[0], function(key, val){
++					lbls.push(key[0].toUpperCase() + key.slice(1));
++				});
++			}
++			if(this.props.actions){
++				lbls.push("Actions");
++			}
++
++			$.each(lbls, function(key, val){
++					var headerCss = null;
++					if(val == "Actions"){
++						headerCss = {paddingLeft: "38px"};
++					}
++					var th = <th style={headerCss} key={key}>{val}</th>;
++					header.push(th);
++				});
++
++			return <div style={this.props.style}>
++					<div style={{minHeight:250}}>
++					<Table responsive>
++						<thead>
++							<tr>
++								{header}
++							</tr>
++						</thead>
++						<tbody>
++							{contentRows}
++						</tbody>
++					</Table>
++					</div>
++					<Button disabled={(this.state.pageRows==null || this.state.tablePage <= 0 )} onClick={this.previousPage}>previous</Button>
++					<span>
++						{pageIndicator}
++					</span>
++					<Button disabled={(this.state.pageRows==null || (this.state.tablePage + 1) * this.state.pageSize >= this.state.max)} onClick={this.nextPage}>next</Button>
++				</div>;
++		}
++	}),
++
++	ODFAssetDetails : React.createClass({
++
++		mixins : [AJAXCleanupMixin],
++
++		onHide : function(){
++			if(this.props.onHide){
++				this.props.onHide();
++			}
++			if(this.isMounted()){
++				this.setState({show: true});
++			}
++		},
++
++		getInitialState : function(){
++			return {
++					show : true
++					};
++		},
++
++		getType : function(){
++			if(this.props.assetType){
++				return this.props.assetType;
++			}else if(this.props.asset && this.props.asset.type){
++				return this.props.asset.type;
++			}
++			return null;
++		},
++
++		getUISpec : function(asset){
++			return UISpec[this.getType()];
++		},
++
++		getPropertiesByType : function(srcObject, uiSpecAttributes){
++			var properties = [];
++			var references = [];
++			var lists = [];
++			var objects = [];
++			if (uiSpecAttributes) {
++	            var label = null;
++	            var func = null;
++	            var key = null;
++	            $.each(uiSpecAttributes, function(index, property){
++	                var value = ODFGlobals.getPathValue(srcObject, property.key);
++	                if (value) {
++	                    if(property.func){
++	                        value = property.func(value, srcObject);
++	                    }
++	                    var obj = property;
++	                    obj.value = value;
++	                    if(value && Array.isArray(value)){
++	                        lists.push(obj);
++	                    }else if(value && value.id && value.repositoryId){
++	                        references.push(obj);
++	                    } /*else if(typeof value === "object"){
++	                        objects.push(obj);
++	                    } */else{
++	                        properties.push(obj);
++	                    }
++	                }
++	            }.bind(this) );
++			}
++	        return {lists: lists, properties: properties, references: references, objects: objects};
++		},
++
++		sortPropsByLabelPosition : function(properties, uiSpecAttributes){
++			if(uiSpecAttributes){
++				properties.sort(function(val1, val2){
++					var index1 = -1;
++					var index2 = -1;
++					for(var no = 0; no < uiSpecAttributes.length; no++){
++						if(uiSpecAttributes[no].label == val1.label){
++							index1 = no;
++						}else if(uiSpecAttributes[no].label == val2.label){
++							index2 = no
++						}
++						if(index1 != -1 && index2 != -1){
++							break;
++						}
++					}
++					if(index1 > index2){
++						return 1;
++					}else if(index1 < index2){
++						return -1;
++					}
++					return 0;
++				});
++			}
++		},
++
++		createPropertiesJSX : function(properties){
++			var props = [];
++			$.each(properties, function(key, val){
++				var value = val.value;
++				if(value){
++					var prop = <FormControls.Static key={key} label={val.label} standalone>{val.value}</FormControls.Static>
++					props.push(prop);
++				}
++			}.bind(this));
++			return props;
++		},
++
++		createReferenceJSX : function(references){
++			var refs = [];
++			$.each(references, function(key, val){
++				var prop = <a key={key} href={val.value.url}>{val.label}</a>
++				refs.push(prop);
++			}.bind(this));
++			return refs;
++		},
++
++		createObjectJSX : function(objects){
++			var objs = [];
++			$.each(objects, function(key, val){
++				var obj = <span key={key}>{JSON.stringify(val.value)}</span>;
++				objs.push(obj);
++			}.bind(this));
++
++			return objs;
++		},
++
++		createTableJSX : function(lists){
++			var tables = [];
++			$.each(lists, function(key, val){
++				var isRemote = false;
++				var first = val.value[0];
++				var rowReferences = null;
++				var rowAssets = null;
++				if(first && first.id && first.repositoryId){
++					rowReferences = val.value;
++				}else{
++					rowAssets = val.value;
++				}
++
++				var spec = null;
++				var label = val.label.toLowerCase();
++				var type = label;
++				if(val.uiSpec){
++					spec = UISpec[val.uiSpec];
++				}else{
++					spec = UISpec[type];
++				}
++
++				var table = <div key={val.label + "_" + key}>
++								<h3>{val.label}</h3>
++								<ODFBrowser.ODFPagingTable rowAssets={rowAssets} assetType={type} rowReferences={rowReferences} onRowClick={this.props.onReferenceClick} spec={spec}/>
++							</div>;
++				tables.push(table);
++			}.bind(this));
++
++			return tables;
++		},
++
++		render : function(){
++			var loadingOverlay = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
++			if(!this.props.loading){
++				loadingOverlay = null;
++			}
++
++			var tablesPanel = <Panel collapsible defaultExpanded={false} header="References">
++	          </Panel>;
++			var propertiesPanel = <Panel collapsible defaultExpanded={false} header="Properties">
++	          </Panel>;
++
++			if(this.props.asset){
++				var uiSpecAttrs = this.getUISpec(this.props.asset).attributes;
++				if(!uiSpecAttrs){
++					uiSpecAttrs = [];
++					$.each(this.props.asset, function(propName, val){
++						var label = propName;
++						if(label && label[0]){
++							label = label[0].toUpperCase() + label.slice(1);
++						}
++						uiSpecAttrs.push({key: propName, label: label});
++					});
++				}
++				var allProps = this.getPropertiesByType(this.props.asset, uiSpecAttrs);
++
++				var properties = allProps.properties;
++				var references = allProps.references;
++				var objects = allProps.objects;
++				var lists = allProps.lists;
++
++				var props = [];
++				var refs = [];
++				var objs = [];
++				var tables = [];
++
++				this.sortPropsByLabelPosition(properties, uiSpecAttrs);
++				props = this.createPropertiesJSX(properties);
++				refs = this.createReferenceJSX(references);
++				objs = this.createObjectJSX(objects);
++				tables = this.createTableJSX(lists);
++
++				if(props.length > 0 || refs.length > 0 || objs.length > 0){
++					propertiesPanel = <Panel collapsible defaultExpanded={true} header="Properties">
++							     		{props}
++							     		{refs}
++							     		{objs}
++							     	  </Panel>;
++				}
++
++				if(tables.length > 0){
++					tablesPanel = <Panel collapsible defaultExpanded={true} header="References">
++						     		{tables}
++						          </Panel>;
++				}
++			}
++
++			var icon = null;
++			if(this.getUISpec(this.props.asset) && this.getUISpec(this.props.asset).icon){
++				icon = this.getUISpec(this.props.asset).icon;
++			}
++
++		    var title = <span>{icon} Details</span>;
++		    if(this.props.asset && this.props.asset.reference){
++		          title = <div>{title} <a target="_blank" href={this.props.asset.reference.url}>( {this.props.asset.reference.id} )</a></div>;
++		    }
++			return <Modal show={this.props.show} onHide={this.onHide}>
++			        	<Modal.Header closeButton>
++			           <Modal.Title>{title}</Modal.Title>
++				        </Modal.Header>
++				        <Modal.Body>
++					    	{loadingOverlay}
++					        {propertiesPanel}
++					        {tablesPanel}
++						</Modal.Body>
++				       <Modal.Footer>
++				       <Button onClick={function(){this.onHide();}.bind(this)}>Close</Button>
++				       </Modal.Footer>
++					</Modal>
++		}
++
++	}),
++
++	//Atlas Metadata browser: either pass an atlas query in the query property in order to execute the query and display the results
++	ODFMetadataBrowser : React.createClass({
++
++		mixins : [AJAXCleanupMixin],
++
++		getInitialState : function() {
++			return ({
++				assets: null,
++				loadingAssetDetails: false
++				});
++		},
++
++		componentWillMount : function() {
++			if(this.props.selection){
++				this.loadSelectionFromAtlas(this.props.selection);
++			}
++		},
++
++		referenceClick: function(val, type){
++			if(!type || (type && val.type)){
++				type = val.type;
++			}
++			var selectedAsset = {id: val.reference.id, repositoryId: val.reference.repositoryId, type: type};
++			URLHelper.setUrlHash(selectedAsset);
++		},
++
++		loadSelectionFromAtlas : function(selection){
++			if(selection){
++				this.setState({showAssetDetails: true, loadingAssetDetails: true});
++				var sel = selection;
++				if(!sel.id){
++					sel = JSON.parse(decodeURIComponent(sel));
++				}
++
++				var loading = false;
++				if(sel.id && sel.repositoryId){
++					if(!this.state.assetDetails || !this.state.assetDetails.reference || this.state.assetDetails.reference &&
++							(this.state.assetDetails.reference.id != sel.id ||
++							this.state.assetDetails.reference.repositoryId != sel.repositoryId)){
++						loading = true;
++						var req = AtlasHelper.loadAtlasAsset(sel,
++								function(data){
++									if(!data.type && sel.type){
++										data.type = sel.type;
++									}
++									var state = {
++											assetDetails: data,
++											loadingAssetDetails: false};
++									this.setState(state);
++								}.bind(this),
++								function(){
++
++								}
++						);
++						this.storeAbort(req.abort);
++					}
++				}
++				if(!loading && this.state.loadingAssetDetails){
++					this.setState({loadingAssetDetails: false});
++				}
++			}
++		},
++
++		componentWillReceiveProps : function(nextProps){
++			if(!this.isMounted()){
++				return;
++			}
++			var newState = {};
++			if(nextProps.selection && this.props.selection != nextProps.selection){
++				this.loadSelectionFromAtlas(nextProps.selection);
++			}else if(nextProps.selection == null){
++				newState.assetDetails = null;
++				newState.showAssetDetails = false;
++			}
++			this.setState(newState);
++		},
++
++		render : function(){
++			var loadingImg = null;
++			var list = null;
++			if(this.props.assets){
++				list = <ODFBrowser.ODFPagingTable actions={this.props.actions} rowAssets={this.props.assets} onRowClick={this.referenceClick} assetType={this.props.type}/>;
++			}else{
++				loadingImg = <Image src="img/lg_proc.gif" rounded />;
++			}
++
++			return <div>{list}
++						{loadingImg}
++						<ODFBrowser.ODFAssetDetails show={this.state.assetDetails != null || this.state.loadingAssetDetails} loading={this.state.loadingAssetDetails} key={(this.state.assetDetails ? this.state.assetDetails.id : "0")} onReferenceClick={this.referenceClick} asset={this.state.assetDetails} onHide={function(){URLHelper.setUrlHash(); this.setState({assetDetails : null})}.bind(this)} />
++					</div>;
++		}
++	})
++}
++
++module.exports = ODFBrowser;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-mixins.js b/odf/odf-web/src/main/webapp/scripts/odf-mixins.js
+new file mode 100755
+index 0000000..40c0aa9
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-mixins.js
+@@ -0,0 +1,51 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++
++var AJAXCleanupMixin = {
++
++	componentWillMount: function() {
++		this.requestAborts = [];
++	},
++
++	storeAborts : function(aborts) {
++		if(Array.isArray(aborts)){
++			$.each(aborts, function(key, val){
++				this.storeAbort(val);
++			}.bind(this));
++		}
++	},
++
++	storeAbort : function(abort) {
++		if(Array.isArray(abort)){
++			$.each(abort, function(key, val){
++				this.requestAborts.push(val);
++			}.bind(this));
++		}else{
++			this.requestAborts.push(abort);
++		}
++	},
++
++	componentWillUnmount : function() {
++		$.each(this.requestAborts, function(key, val){
++			if(val && val.call){
++				val.call();
++			}
++		});
++	}
++};
++
++module.exports = AJAXCleanupMixin;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-notifications.js b/odf/odf-web/src/main/webapp/scripts/odf-notifications.js
+new file mode 100755
+index 0000000..a3b99ce
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-notifications.js
+@@ -0,0 +1,171 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ReactDOM = require("react-dom");
++var d3 = require("d3");
++var ReactBootstrap = require("react-bootstrap");
++var ReactD3 = require("react-d3-components");
++var ODFGlobals = require("./odf-globals.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var ReactD3 = require("react-d3-components");
++var LineChart = ReactD3.LineChart;
++var Input = ReactBootstrap.Input;
++var Image = ReactBootstrap.Image;
++
++var REFRESH_DELAY = 5000;
++
++var CurrentNotificationsGraph = React.createClass({
++
++	tooltipLine : function(label, data) {
++        return "Arrived notifications " + data.y;
++    },
++
++	render : function(){
++		var lineChart = null;
++
++		if(this.props.values){
++			var data = [
++			        {
++			        	label: 'Asset notifications',
++			            values: [ ]
++			         }
++			    ];
++
++			for(var no = 0; no < this.props.values.length; no++){
++				data[0].values.push({x : no + 1, y : this.props.values[no]});
++			};
++
++			lineChart = (<LineChart
++			                data={data}
++							width={400}
++			                height={400}
++			                margin={{top: 10, bottom: 50, left: 50, right: 10}}
++			                tooltipContained
++		                    tooltipHtml={this.tooltipLine}
++			                shapeColor={"red"}
++			 				xAxis={{tickValues: []}}
++							/>);
++		}
++
++		return (
++				<div>
++					<h4>Number of received notifications</h4>
++					<h5>(This only works for the node this web application is running on.  In a clustered environment, notifications could be processed on another node and therefore not be visible here)</h5>
++
++					{lineChart}
++				</div>);
++	}
++
++
++});
++
++var ODFNotificationsGraph = React.createClass({
++	mixins : [AJAXCleanupMixin],
++
++	getInitialState : function(){
++		return {notifications : [], notificationCount : [0]};
++	},
++
++	getNotifications : function(){
++		const url = ODFGlobals.metadataUrl + "/notifications?numberOfNotifications=50";
++        var req = $.ajax({
++            url: url,
++            contentType: "application/json",
++            type: 'GET',
++            success: function(data) {
++            	this.setState({notifications: data.notifications});
++            }.bind(this),
++            error: function(xhr, status, err) {
++              var msg = "ODF notification request failed, " + err.toString();
++              this.props.alertCallback({type: "danger", message: msg});
++            }.bind(this)
++        });
++
++        this.storeAbort(req.abort);
++	},
++
++	getNotificationCount : function() {
++		const url = ODFGlobals.metadataUrl + "/notifications/count";
++        var req = $.ajax({
++            url: url,
++            contentType: "application/json",
++            type: 'GET',
++            success: function(data) {
++            	var current = this.state.notificationCount;
++            	if(!current){
++            		current = [];
++            	}else if(current.length > 1 && current[current.length - 1] != current[current.length - 2]){
++            		this.getNotifications();
++            	}
++            	if(current.length == 10){
++            		current.splice(0, 1);
++            	}
++            	current.push(data.notificationCount);
++               this.setState({notificationCount: current});
++            }.bind(this),
++            error: function(xhr, status, err) {
++              var msg = "ODF notification count request failed, " + err.toString();
++              this.props.alertCallback({type: "danger", message: msg});
++            }.bind(this)
++        });
++
++        this.storeAbort(req.abort);
++	},
++
++	componentWillMount : function() {
++		this.getNotifications();
++		this.getNotificationCount();
++	},
++
++	componentWillUnmount () {
++	    this.refreshInterval && clearInterval(this.refreshInterval);
++	    this.refreshInterval = false;
++	},
++
++	componentWillReceiveProps: function(nextProps){
++		if(!nextProps.visible){
++			 this.refreshInterval && clearInterval(this.refreshInterval);
++			 this.refreshInterval = false;
++		}else if(!this.refreshInterval){
++			this.refreshInterval = window.setInterval(this.getNotificationCount, REFRESH_DELAY);
++		}
++	},
++	render : function(){
++		var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
++
++		var notificationGraph = null;
++		if(this.state){
++			progressIndicator = null;
++			notificationGraph = <CurrentNotificationsGraph values={this.state.notificationCount} />;
++		}
++
++		var notificationsValue = "";
++		$.each(this.state.notifications, function(key, val){
++			notificationsValue +="\n";
++			notificationsValue += val.type + " , " + val.asset.repositoryId + " -- " + val.asset.id;
++		});
++
++		return (
++				<div>
++					{progressIndicator}
++					{notificationGraph}
++					<textarea disabled style={{width: '100%', height: '300px'}} value={notificationsValue} />
++				</div>);
++
++	}
++});
++
++module.exports = ODFNotificationsGraph;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js b/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js
+new file mode 100755
+index 0000000..55c053b
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js
+@@ -0,0 +1,154 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ReactBootstrap = require("react-bootstrap");
++
++var ODFAssetDetails = require("./odf-metadata-browser.js").ODFAssetDetails;
++var ODFPagingTable = require("./odf-metadata-browser.js").ODFPagingTable;
++var ODFGlobals = require("./odf-globals.js");
++var AtlasHelper = require("./odf-utils.js").AtlasHelper;
++var URLHelper = require("./odf-utils.js").URLHelper;
++var AJAXCleanupMixin = require("./odf-mixins.js");
++
++var Image = ReactBootstrap.Image;
++
++var ODFRequestBrowser = React.createClass({
++
++	mixins : [AJAXCleanupMixin],
++
++	getInitialState : function(){
++		return {assetDetails : null, loadingAssetDetails: false};
++	},
++
++	getDiscoveryServiceNameFromId(id) {
++		if(!this.props.registeredServices){
++			return id;
++		}
++		var servicesWithSameId = this.props.registeredServices.filter(
++	         function(dsreg) {
++	             return dsreg.id == id;
++	         }
++		);
++		if (servicesWithSameId.length > 0) {
++			return servicesWithSameId[0].name;
++		}
++		return id;
++	},
++
++	loadSelectedRequestStatus: function(requestId){
++		if(requestId){
++			this.setState({showAssetDetails: true, loadingAssetDetails: true});
++
++			var req = $.ajax({
++	            url: ODFGlobals.analysisUrl + "/" + requestId,
++	            contentType: "application/json",
++	            dataType: 'json',
++	            type: 'GET',
++	            success: function(data) {
++	            	$.each(data.serviceRequests, function(key, request){
++	            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
++	            		request.discoveryServiceName = serviceName;
++            		}.bind(this));
++
++	               this.setState({assetType: "request", assetDetails: data, loadingAssetDetails: false});
++	            }.bind(this),
++	            error: function(data){
++	            	this.setState({loadingAssetDetails: false});
++	            }.bind(this)
++	        });
++		    this.storeAbort(req.abort);
++
++			if(this.state.loadingAssetDetails){
++				this.setState({loadingAssetDetails: false});
++			}
++		}
++	},
++
++	loadSelectionFromAtlas : function(selection){
++		if(selection){
++			this.setState({showAssetDetails: true, loadingAssetDetails: true});
++			var sel = selection;
++			if(!sel.id){
++				sel = JSON.parse(decodeURIComponent(sel));
++			}
++
++			var loading = false;
++			if(sel.id && sel.repositoryId){
++				if(!this.state.assetDetails || !this.state.assetDetails.reference || this.state.assetDetails.reference &&
++						(this.state.assetDetails.reference.id != sel.id ||
++						this.state.assetDetails.reference.repositoryId != sel.repositoryId)){
++						loading = true;
++						var req = AtlasHelper.loadAtlasAsset(sel,
++							function(data){
++								if(!data.type && sel.type){
++									data.type = sel.type;
++								}
++								var state = {
++										assetDetails: data, assetType: data.type, loadingAssetDetails: false};
++								this.setState(state);
++							}.bind(this),
++							function(){
++
++							}
++						);
++					    this.storeAbort(req.abort);
++				}
++			}
++
++			if(!loading && this.state.loadingAssetDetails){
++				this.setState({loadingAssetDetails: false});
++			}
++		}
++	},
++
++	componentWillReceiveProps : function(nextProps){
++		if(!this.isMounted()){
++			return;
++		}
++		var newState = {};
++		if((nextProps.selection && this.props.selection && this.props.selection.id != nextProps.selection.id) || (nextProps.selection && this.props.selection == null)){
++			if(nextProps.selection.id && nextProps.selection.repositoryId){
++				this.loadSelectionFromAtlas(nextProps.selection);
++			}else{
++				this.loadSelectedRequestStatus(nextProps.selection);
++			}
++		}else if(nextProps.selection == null){
++			newState.assetDetails = null;
++		}
++		this.setState(newState);
++	},
++
++	rowClick : function(val, type){
++		if(!type || (type && val.type)){
++			type = val.type;
++		}
++		if(val && val.reference && val.reference.id){
++			var selectedAsset = {id: val.reference.id, repositoryId: val.reference.repositoryId, type: type};
++			URLHelper.setUrlHash(JSON.stringify(selectedAsset));
++		}else if(val && val.request && val.request.id){
++			URLHelper.setUrlHash(JSON.stringify({requestId : val.request.id}));
++		}
++	},
++
++	render : function(){
++		return <div>
++					<ODFPagingTable actions={this.props.actions} rowAssets={this.props.assets} onRowClick={this.rowClick} assetType="requests"/>
++					<ODFAssetDetails show={this.state.assetDetails != null || this.state.loadingAssetDetails} loading={this.state.loadingAssetDetails} key={(this.state.assetDetails ? this.state.assetDetails.id : "0")} onReferenceClick={this.rowClick} asset={this.state.assetDetails} assetType={this.state.assetType} onHide={function(){URLHelper.setUrlHash(); this.setState({showAssetDetails : false})}.bind(this)} />
++				</div>;
++	}
++});
++
++module.exports = ODFRequestBrowser;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-services.js b/odf/odf-web/src/main/webapp/scripts/odf-services.js
+new file mode 100755
+index 0000000..cf5314b
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-services.js
+@@ -0,0 +1,251 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++//js imports
++var $ = require("jquery");
++var bootstrap = require("bootstrap");
++
++var React = require("react");
++var ReactDOM = require("react-dom");
++var LinkedStateMixin = require("react-addons-linked-state-mixin");
++var ReactBootstrap = require("react-bootstrap");
++
++var ODFGlobals = require("./odf-globals.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var configurationStore = require("./odf-utils.js").ConfigurationStore;
++var servicesStore = require("./odf-utils.js").ServicesStore;
++
++var Button = ReactBootstrap.Button;
++var Jumbotron = ReactBootstrap.Jumbotron;
++var Grid = ReactBootstrap.Grid;
++var Row = ReactBootstrap.Row;
++var Col = ReactBootstrap.Col;
++var Table = ReactBootstrap.Table;
++var Modal = ReactBootstrap.Modal;
++var Input = ReactBootstrap.Input;
++var Alert = ReactBootstrap.Alert;
++var Panel = ReactBootstrap.Panel;
++var Label = ReactBootstrap.Label;
++var Input = ReactBootstrap.Input;
++var Image = ReactBootstrap.Image;
++
++var DiscoveryServiceInfo = React.createClass({
++	mixins : [AJAXCleanupMixin],
++
++    testService() {
++        const url = ODFGlobals.servicesUrl + "/" + this.props.dsreg.id;
++        var req = $.ajax({
++            url: url,
++            contentType: "application/json",
++            dataType: 'json',
++            type: 'GET',
++            success: function(data) {
++                var type = "success";
++                if (data.status != "OK") {
++                    type = "danger";
++                }
++                var msg = "Status of ODF service '" + this.props.dsreg.name + "' is "+ data.status +" (" + data.message + ")";
++                this.props.alertCallback({type: type, message: msg});
++            }.bind(this),
++            error: function(xhr, status, err) {
++            	if(status != "abort" ){
++            		console.error(url, status, err.toString());
++            	}
++         	    if(this.isMounted()){
++         	    	var msg = "Service test failed: " + status + ", " + err.toString();
++         	    	this.props.alertCallback({type: "danger", message: msg});
++         	    }
++            }.bind(this)
++        });
++
++        this.storeAbort(req.abort);
++    },
++
++    deleteService() {
++        const url = ODFGlobals.servicesUrl + "/" + this.props.dsreg.id;
++        $.ajax({
++            url: url,
++            type: 'DELETE',
++            success: function(data) {
++            	if(this.isMounted()){
++            		this.props.refreshCallback();
++            	}
++            }.bind(this),
++            error: function(xhr, status, err) {
++            	if(status != "abort" ){
++            		console.error(url, status, err.toString());
++            	}
++            	if(this.isMounted()){
++	              var msg = "Service could not be deleted: " + status + ", " + err.toString();
++	              this.props.alertCallback({type: "danger", message: msg});
++			  }
++			}.bind(this)
++        });
++    },
++
++	render() {
++    	var icon = "";
++    	var imgUrl = this.props.dsreg.iconUrl;
++    	//urls will be used directly.
++    	if(imgUrl != null && (imgUrl.trim().startsWith("http://") || imgUrl.trim().startsWith("https://"))){
++    		icon = imgUrl;
++    	}else{
++    		icon = ODFGlobals.servicesUrl + "/" + encodeURIComponent(this.props.dsreg.id) + "/image";
++    	}
++
++    	var endpointInfo = <span>No additional information</span>;
++    	if (this.props.dsreg.endpoint.type == "Java") {
++            endpointInfo = <span><em>Java class name</em>: {this.props.dsreg.endpoint.className}</span>;
++    	}
++		return (
++				<Grid>
++				  <Row className="show-grid">
++				    <Col sm={1}>
++		             <div >
++		               <Image src={icon} rounded/>
++	   	             </div>
++		            </Col>
++		            <Col sm={4}>
++	             	  <b>{this.props.dsreg.name}</b>
++	             	  <br/>
++	             	  {this.props.dsreg.description}
++	             	  <br/>
++	             	  <a href={this.props.dsreg.link} target="_blank">More</a>
++		              </Col>
++	             	<Col sm={5}>
++	             	  <em>Type</em>: {this.props.dsreg.endpoint.type}
++	       	          <br/>
++	       	          {endpointInfo}
++	       	          <br/>
++	       	          <em>ID</em>: {this.props.dsreg.id}
++	       	          <br/>
++	       	          <em>Protocol</em>: {this.props.dsreg.protocol}
++	             	</Col>
++	             	<Col sm={2}>
++	             	  <Button bsStyle="primary" onClick={this.testService}>Test</Button>
++	             	  <br/>
++	             	 <Button bsStyle="warning" onClick={this.deleteService}>Delete</Button>
++	             	</Col>
++		         </Row>
++		      </Grid>
++		);
++	}
++});
++
++var AddDiscoveryServiceButton = React.createClass({
++  mixins: [LinkedStateMixin, AJAXCleanupMixin],
++
++  getInitialState() {
++	  return({showModal: false, serviceEndpointType: "Spark", parallelismCount: 2, serviceInterfaceType: "DataFrame"});
++  },
++
++  open() {
++    this.setState({showModal: true, errorMessage: null});
++  },
++
++  close() {
++    this.setState({showModal: false});
++  },
++
++  addService() {
++
++	var newService = JSON.parse(JSON.stringify(this.state));
++	delete newService.showModal;
++	delete newService.errorMessage
++
++	 var sparkEndpoint = {
++        jar: newService.serviceApplication,
++        className: newService.serviceClassName,
++        inputMethod: newService.serviceInterfaceType,
++        runtimeName: newService.serviceEndpointType
++	 };
++
++	newService.endpoint = sparkEndpoint;
++
++    delete newService.serviceEndpointType;
++    delete newService.serviceType;
++    delete newService.serviceApplication;
++    delete newService.serviceClassName;
++    delete newService.serviceInterfaceType;
++
++    $.ajax({
++      url: ODFGlobals.servicesUrl,
++      contentType: "application/json",
++      type: 'POST',
++      data: JSON.stringify(newService),
++      success: function(data) {
++		  if(this.isMounted()){
++			  this.close();
++			  this.props.refreshCallback();
++		  }
++      }.bind(this),
++      error: function(xhr, status, err) {
++		  if(this.isMounted()){
++			var errorMsg = status;
++			if(xhr.responseJSON && xhr.responseJSON.error){
++    				errorMsg = xhr.responseJSON.error;
++    		  	}
++		    	var msg = "Service could not be added: " + errorMsg + ", " + err.toString();
++	    	  	this.setState({errorMessage: msg});
++		  }
++      }.bind(this)
++    });
++  },
++
++  render() {
++    var alert = null;
++    if (this.state.errorMessage) {
++       alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
++    }
++
++  	var endpointInput = null;
++	endpointInput = <div>
++						<Input type="text" valueLink={this.linkState("serviceApplication")} label="Application jar (or zip) file"/>
++						<Input type="text" valueLink={this.linkState("serviceClassName")} label="Class name"/>
++			            <Input type="select" valueLink={this.linkState("serviceInterfaceType")} label="Service interface type" placeholder="DataFrame">
++			                <option value="DataFrame">DataFrame</option>
++			                <option value="Generic">Generic</option>
++		                </Input>
++	            	</div>;
++
++	  return(
++				<span>
++				<Button bsStyle="primary" bsSize="large" onClick={this.open}>Add ODF Service</Button>
++				  <Modal show={this.state.showModal} onHide={this.close}>
++						<Modal.Header closeButton>
++						 	<Modal.Title>Add ODF Service</Modal.Title>
++						</Modal.Header>
++						<Modal.Body>
++						{alert}
++						  <Input type="text" ref="serviceName"  valueLink={this.linkState("name")} label="Name"/>
++							<Input type="text" valueLink={this.linkState("description")} label="Description"/>
++							<Input type="text" valueLink={this.linkState("id")} label="ID"/>
++							<Input type="number" valueLink={this.linkState("parallelismCount")} label="Allowed parallel requests"/>
++							<Input type="select" valueLink={this.linkState("serviceEndpointType")} label="Type" placeholder="Spark">
++			                	<option value="Spark">Spark</option>
++			                </Input>
++							{endpointInput}
++							<Input type="text" valueLink={this.linkState("iconUrl")} label="Icon (Optional)"/>
++							<Input type="text" valueLink={this.linkState("link")} label="Link (Optional)"/>
++					  </Modal.Body>
++				    <Modal.Footer>
++				    <Button bsStyle="primary" onClick={this.addService}>Add</Button>
++				    <Button onClick={this.close}>Cancel</Button>
++				    </Modal.Footer>
++			     </Modal>
++				</span>
++			);
++		}
++});
++module.exports = {DiscoveryServiceInfo: DiscoveryServiceInfo, AddDiscoveryServiceButton: AddDiscoveryServiceButton};
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-settings.js b/odf/odf-web/src/main/webapp/scripts/odf-settings.js
+new file mode 100755
+index 0000000..32802c7
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-settings.js
+@@ -0,0 +1,552 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++//js imports
++var $ = require("jquery");
++var bootstrap = require("bootstrap");
++var React = require("react");
++var ReactDOM = require("react-dom");
++var LinkedStateMixin = require("react-addons-linked-state-mixin");
++var ReactBootstrap = require("react-bootstrap");
++
++var ODFGlobals = require("./odf-globals.js");
++var AJAXCleanupMixin = require("./odf-mixins.js");
++var configurationStore = require("./odf-utils.js").ConfigurationStore;
++var metadataStore = require("./odf-utils.js").MetadataStore;
++
++var Button = ReactBootstrap.Button;
++var Table = ReactBootstrap.Table;
++var Modal = ReactBootstrap.Modal;
++var Input = ReactBootstrap.Input;
++var Alert = ReactBootstrap.Alert;
++var Panel = ReactBootstrap.Panel;
++var Label = ReactBootstrap.Label;
++var Input = ReactBootstrap.Input;
++var Image = ReactBootstrap.Image;
++var Tabs = ReactBootstrap.Tabs;
++var Tab = ReactBootstrap.Tab;
++
++var ODFConfigPage = React.createClass({
++  mixins: [LinkedStateMixin, AJAXCleanupMixin],
++
++  getInitialState() {
++      return ({odfconfig: { odf: {} }, showDeleteConfirmationDialog: false});
++  },
++
++  componentWillMount() {
++    this.loadODFConfig();
++  },
++
++  componentWillUnmount() {
++	  this.props.alertCallback({type: ""});
++  },
++
++  // all the properties we display under the "odf" path
++  relevantODFPropList: ["instanceId", "odfUrl", "odfUser", "odfPassword", "consumeMessageHubEvents", "atlasMessagehubVcap", "runAnalysisOnImport", "runNewServicesOnRegistration"],
++
++  loadODFConfig() {
++     var req = configurationStore.readConfig(
++    	       function(data) {
++    	    	 // only "fish out" the properties we display and add them as
++    	    	 // toplevel properties to the state.
++
++    	    	 // if we have to make more complex updates this will no longer work
++    	    	 var newStateObj = {};
++    	    	 for (var i=0; i<this.relevantODFPropList.length; i++) {
++    	    		 var prop = this.relevantODFPropList[i];
++    	    		 if (data[prop]) {
++    	    			 newStateObj[prop] = data[prop];
++    	    		 }
++    	    	 }
++    	         this.setState( newStateObj );
++    	       }.bind(this),
++    	       this.props.alertCallback
++    	     );
++	 metadataStore.getProperties(
++			 function(data) {
++			     this.setState({repositoryId: data.STORE_PROPERTY_ID});
++			 }.bind(this)
++	 );
++     this.storeAbort(req.abort);
++  },
++
++  saveODFConfig() {
++	  var newConfigObj = {};
++	  for (var i=0; i<this.relevantODFPropList.length; i++) {
++		 var prop = this.relevantODFPropList[i];
++		 if (this.state[prop] != null) {
++			 newConfigObj[prop] = this.state[prop];
++		 }
++	  }
++	  var req = configurationStore.updateConfig(newConfigObj,
++			  () => {
++					  if(this.isMounted()){
++						  this.props.alertCallback({type: "success", message: "Settings saved successfully."})
++					  }
++				  },
++			  this.props.alertCallback );
++	  this.storeAbort(req.abort);
++  },
++
++  createAtlasSampleData() {
++    this.refs.sampleDataButton.disabled = true;
++    $.ajax({
++       url: ODFGlobals.metadataUrl + "/sampledata",
++       type: 'GET',
++       success: function(data) {
++    	   if(this.isMounted()){
++	    	   this.refs.sampleDataButton.disabled = false;
++	    	   this.props.alertCallback({type: "success", message: "Sample data created successfully."});
++    	   }
++	   }.bind(this),
++       error: function(xhr, status, err) {
++    	   if(this.isMounted()){
++    		   var msg = "Sample data creation failed failed: " + err.toString();
++    		   this.props.alertCallback({type: "danger", message: msg});
++    		   this.refs.sampleDataButton.disabled = false;
++    	   }
++	   }.bind(this)
++     });
++  },
++
++  deleteAllAtlasData() {
++	 this.refs.deleteAllDataButton.disabled = true;
++	 $.ajax({
++	       url: ODFGlobals.metadataUrl + "/resetalldata",
++	       type: 'POST',
++	       success: function(data) {
++	    	   if(this.isMounted()){
++		    	   this.refs.deleteAllDataButton.disabled = false;
++		    	   this.props.alertCallback({type: "success", message: "All data removed!"});
++		    	   this.closeDeleteConfirmationDialog();
++	    	   }
++		   }.bind(this),
++	       error: function(xhr, status, err) {
++	    	   if(this.isMounted()){
++	    		   var msg = "Data deletion failed: " + err.toString();
++	    		   this.props.alertCallback({type: "danger", message: msg});
++	    		   this.refs.deleteAllDataButton.disabled = false;
++		    	   this.closeDeleteConfirmationDialog();
++	    	   }
++		   }.bind(this)
++	     });
++  },
++
++  openDeleteConfirmationDialog() {
++	  this.setState( { showDeleteConfirmationDialog: true} );
++  },
++
++  closeDeleteConfirmationDialog() {
++	  this.setState( { showDeleteConfirmationDialog: false} );
++  },
++
++
++  testAtlasConnection() {
++//	  this.props.alertCallback({type: "warning", message: "Test connection not implemented yet."});
++    $.ajax({
++       url: ODFGlobals.metadataUrl + "/connectiontest",
++       type: 'GET',
++       success: function(data) {
++    	   if(this.isMounted()){
++	    	   this.props.alertCallback({type: "success", message: "Connection test successful."});
++    	   }
++	   }.bind(this),
++       error: function(xhr, status, err) {
++    	   if(this.isMounted()){
++    		   var msg = "Connection test failed: " + err.toString();
++    		   this.props.alertCallback({type: "danger", message: msg});
++    	   }
++	   }.bind(this)
++     });
++  },
++
++  notificationValue() {
++      if (this.state.runAnalysisOnImport) {
++          return "create";
++      }
++      return "none";
++  },
++
++  notificationsChanged() {
++      var newValue = this.refs.runAnalysisInput.getValue();
++      var val = (newValue != "none");
++      this.setState({runAnalysisOnImport: val});
++  },
++
++  render() {
++    var divStyle = {
++      marginLeft: "20px"
++    };
++    return (
++      <div>
++      	<form>
++	      <fieldset className="form-group  label-floating">
++		        <legend>General Settings</legend>
++		        <br/>
++		        <h4>Instance</h4>
++		          <div style={divStyle}>
++		            <Input type="text" label="ODF Instance ID" valueLink={this.linkState("instanceId")} disabled/>
++		            <Input type="text" label="ODF URL" valueLink={this.linkState("odfUrl")}/>
++		            <Input type="text" label="ODF User ID" valueLink={this.linkState("odfUser")}/>
++		            <Input type="password" label="ODF Password" valueLink={this.linkState("odfPassword")}/>
++		          </div>
++		        <hr/>
++		        <h4>Metadata store</h4>
++		        <div style={divStyle}>
++		          <Input type="text" label="Repository ID" valueLink={this.linkState("repositoryId")} disabled/>
++
++		          <div style={divStyle} className="checkbox">
++		           <label>
++	                <input ref="consumeMessageHubEvents" type="checkbox" checkedLink={this.linkState("consumeMessageHubEvents")} />
++	                <span className="checkbox-material">
++	                	<span className="check"></span>
++	                </span>
++	                &nbsp;&nbsp;Consume events from Messagehub instead of a Kafka instance
++	              </label>
++	            </div>
++		          <Input type="text" label="Atlas Messagehub VCAP" disabled={(this.state && !this.state["consumeMessageHubEvents"])} valueLink={this.linkState("atlasMessagehubVcap")}/>
++		          <Button bsStyle="primary" onClick={this.testAtlasConnection}>Test connection</Button>
++		          <Button bsStyle="success" ref="sampleDataButton" onClick={this.createAtlasSampleData}>Create Atlas sample data</Button>
++		          <Button bsStyle="danger" ref="deleteAllDataButton" onClick={this.openDeleteConfirmationDialog}>Delete all Atlas data</Button>
++		          <Modal show={this.state.showDeleteConfirmationDialog} onHide={this.closeDeleteConfirmationDialog}>
++		            <Modal.Header closeButton>
++		              <Modal.Title>Confirm deletion</Modal.Title>
++		            </Modal.Header>
++		            <Modal.Body>
++		              <h4>Are you sure you want to delete all data from the metadata repository?</h4>
++		            </Modal.Body>
++		              <Modal.Footer>
++		              <Button onClick={this.deleteAllAtlasData}>Delete all Data</Button>
++		              <Button onClick={this.closeDeleteConfirmationDialog}>Close</Button>
++		            </Modal.Footer>
++		          </Modal>
++		        </div>
++		        <hr/>
++		        <h4>Notifications</h4>
++		        <div style={divStyle}>
++		          <Input type="select" label="Run analysis automatically" ref="runAnalysisInput" onChange={this.notificationsChanged} value={this.notificationValue()}>
++		             <option value="none">Never</option>
++                     <option value="create">On create</option>
++		          </Input>
++		        </div>
++		        <div style={divStyle} className="checkbox">
++		           <label>
++	                <input ref="runServicesOnRegInput" type="checkbox" checkedLink={this.linkState("runNewServicesOnRegistration")} />
++	                <span className="checkbox-material">
++	                	<span className="check"></span>
++	                </span>
++	                &nbsp;&nbsp;Automatically run all newly registered services in order to keep asset metadata up-to-date
++	              </label>
++	            </div>
++		        <hr/>
++		        <Button className="btn-raised" bsStyle="primary" onClick={this.saveODFConfig}>Save Settings</Button>
++		        <Button onClick={this.loadODFConfig}>Reload</Button>
++	        </fieldset>
++        </form>
++      </div>);
++  }
++
++});
++
++
++var SparkConfigPage = React.createClass({
++  mixins: [LinkedStateMixin, AJAXCleanupMixin],
++
++  getInitialState() {
++      return ({"clusterMasterUrl": ""});
++  },
++
++  componentWillMount() {
++    this.loadODFConfig();
++  },
++
++  componentWillUnmount() {
++	  this.props.alertCallback({type: ""});
++  },
++
++   loadODFConfig() {
++     var req = configurationStore.readConfig(
++    	       function(data) {
++    	    	 var sparkConfig = {};
++    	    	 if(data.sparkConfig != null){
++    	    		 sparkConfig = data.sparkConfig;
++    	    	 }
++    	    	 this.setState(sparkConfig);
++    	       }.bind(this),
++    	       this.props.alertCallback
++    	     );
++     this.storeAbort(req.abort);
++  },
++
++  saveODFConfig() {
++	  var sparkConfig = {clusterMasterUrl: this.state.clusterMasterUrl};
++	  var req = configurationStore.updateConfig({"sparkConfig" : sparkConfig},
++			  () => {
++					  if(this.isMounted()){
++						  this.props.alertCallback({type: "success", message: "Spark config saved successfully."})
++					  }
++				  },
++			  this.props.alertCallback );
++	  this.storeAbort(req.abort);
++  },
++
++  render() {
++    var divStyle = {
++      marginLeft: "20px"
++    };
++
++	var sparkSettings =  <div>
++						<h4>Local spark cluster</h4>
++		    			<div style={divStyle}>
++						  <Input type="text" label="Cluster master url" valueLink={this.linkState("clusterMasterUrl")}/>
++						</div>
++					 </div>;
++
++
++    return (
++      <div>
++      	<form>
++	      <fieldset className="form-group  label-floating">
++		        <legend>Spark configuration</legend>
++		        	{sparkSettings}
++		        <Button className="btn-raised" bsStyle="primary" onClick={this.saveODFConfig}>Save Settings</Button>
++		        <Button onClick={this.loadODFConfig}>Reload</Button>
++	        </fieldset>
++        </form>
++      </div>);
++  }
++
++});
++
++
++var PropertyAddButton = React.createClass({
++  getInitialState() {
++     return ({showModal: false});
++  },
++
++  close() {
++    this.setState({ showModal: false });
++  },
++
++  save() {
++    var newPropObj = {};
++    newPropObj[this.state.name] = this.state.value;
++    var updateConfig = { userDefined: newPropObj };
++    configurationStore.updateConfig(updateConfig,
++    		() => { this.props.successCallback();
++	                this.props.alertCallback({type: "success", message: "User-defined property added successfully."})
++    		},
++    		this.props.alertCallback
++    );
++  },
++
++  saveAndClose() {
++    this.save();
++    this.close();
++  },
++
++  open() {
++    this.setState({ showModal: true });
++  },
++
++  handleTextChange() {
++    this.setState({
++          name: this.refs.inputName.getValue(),
++          value: this.refs.inputValue.getValue()
++        });
++  },
++
++  handleClick() {
++      this.open();
++  },
++
++  render: function() {
++    return (<span>
++    <Button bsStyle="primary" className="btn-raised" onClick={this.handleClick}>Add</Button>
++      <Modal show={this.state.showModal} onHide={this.close}>
++          <Modal.Header closeButton>
++             <Modal.Title>Add Property</Modal.Title>
++          </Modal.Header>
++          <Modal.Body>
++             <Input type="text" ref="inputName" label="Name" onChange={this.handleTextChange}></Input>
++             <Input type="text" ref="inputValue" label="Value" onChange={this.handleTextChange}></Input>
++         </Modal.Body>
++         <Modal.Footer>
++         <Button bsStyle="primary" onClick={this.saveAndClose}>Save</Button>
++         <Button onClick={this.close}>Cancel</Button>
++         </Modal.Footer>
++			</Modal>
++      </span>);
++  }
++
++});
++
++
++var PropertyRemoveButton = React.createClass({
++
++   handleClick() {
++      var newPropObj = {};
++      newPropObj[this.props.name] = null;
++      var updateConfig = { userDefined: newPropObj };
++      configurationStore.updateConfig(updateConfig,
++    		  () => { this.props.successCallback();
++		              this.props.alertCallback({type: "success", message: "User-defined property removed successfully."});
++    		  },
++    		  this.props.alertCallback
++      );
++   },
++
++   render() {
++     return (
++    		 <Button onClick={this.handleClick}>Remove</Button>
++    );
++   }
++
++});
++var PropertyEditButton = React.createClass({
++
++   getInitialState() {
++      return ({showModal: false});
++   },
++
++   close() {
++     this.setState({ showModal: false });
++   },
++
++   save() {
++     var newPropObj = {};
++     newPropObj[this.props.name] = this.state.value;
++     var updateConfig = { userDefined: newPropObj };
++     configurationStore.updateConfig(updateConfig,
++    		 () => { this.props.successCallback();
++    		         this.props.alertCallback({type: "success", message: "User-defined property saved successfully."})
++    		 }, this.props.alertCallback
++     );
++   },
++
++   saveAndClose() {
++     this.save();
++     this.close();
++   },
++
++   open() {
++     this.setState({ showModal: true });
++   },
++
++   handleTextChange() {
++     this.setState({
++           value: this.refs.input.getValue()
++         });
++   },
++
++   handleClick() {
++       this.open();
++   },
++
++   render: function() {
++     return (
++       <span>
++        <Button bsStyle="primary" onClick={this.handleClick}>Edit</Button>
++        <Modal show={this.state.showModal} onHide={this.close}>
++            <Modal.Header closeButton>
++               <Modal.Title>Edit Property</Modal.Title>
++            </Modal.Header>
++            <Modal.Body>
++               <h4>Enter new value for property ''{this.props.name}''</h4>
++               <Input type="text" ref="input" onChange={this.handleTextChange} defaultValue={this.props.value}></Input>
++           </Modal.Body>
++           <Modal.Footer>
++           <Button bsStyle="primary" onClick={this.saveAndClose}>Save</Button>
++           <Button onClick={this.close}>Cancel</Button>
++           </Modal.Footer>
++        </Modal>
++       </span>);
++   }
++});
++
++var UserDefinedConfigPage = React.createClass({
++   mixins : [AJAXCleanupMixin],
++
++   getInitialState: function() {
++      return {odfconfig: { userDefined: {}}};
++   },
++
++   loadUserDefConfig: function() {
++     var req = configurationStore.readConfig(
++       function(data) {
++         this.setState( {odfconfig: data} );
++       }.bind(this),
++       this.props.alertCallback
++     );
++
++     this.storeAbort(req.abort);
++   },
++
++   componentDidMount: function() {
++     this.loadUserDefConfig();
++   },
++
++   componentWillUnmount : function() {
++	  this.props.alertCallback({type: ""});
++   },
++
++   render: function() {
++     var tableContents = $.map(
++           this.state.odfconfig.userDefined,
++           function(value, name) {
++        	   if (value) {
++        		 var tdBtnFixStyle = { paddingTop : "26px"};
++
++                 return <tr key={name}>
++                          <td style={tdBtnFixStyle}>{name}</td>
++                          <td style={tdBtnFixStyle}>{value}</td>
++                          <td><PropertyEditButton name={name} value={value} successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
++                              <PropertyRemoveButton name={name} successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
++                          </td>
++                        </tr>;
++        	   }
++        	   // empty element
++        	   return null;
++           }.bind(this));
++     return (
++       <div>
++	       <form>
++		      <fieldset className="form-group  label-floating">
++		      <legend>
++			       User-defined properties
++		       </legend>
++		       <Table responsive>
++		          <thead>
++		            <tr>
++		              <th>Name</th>
++		              <th>Value</th>
++		              <th></th>
++		            </tr>
++		          </thead>
++		          <tbody>
++		             {tableContents}
++		          </tbody>
++		       </Table>
++		       <PropertyAddButton successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
++	       </fieldset>
++	       </form>
++       </div>);
++   }
++
++});
++
++
++
++module.exports = {ODFConfigPage : ODFConfigPage, SparkConfigPage : SparkConfigPage, UserDefinedConfigPage: UserDefinedConfigPage} ;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-statistics.js b/odf/odf-web/src/main/webapp/scripts/odf-statistics.js
+new file mode 100755
+index 0000000..ea0e151
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-statistics.js
+@@ -0,0 +1,413 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var d3 = require("d3");
++var ReactBootstrap = require("react-bootstrap");
++var ReactD3 = require("react-d3-components");
++
++var AJAXCleanupMixin = require("./odf-mixins.js");
++
++var Image = ReactBootstrap.Image;
++var Panel = ReactBootstrap.Panel;
++var BarChart = ReactD3.BarChart;
++var PieChart = ReactD3.PieChart;
++var LineChart = ReactD3.LineChart;
++
++var ODFGlobals = require("./odf-globals.js");
++
++const GRAPH_REFRESH_DELAY_MS = 4000;
++
++var ODFStats = {
++	CurrentThreadGraph : React.createClass({
++
++		tooltipLine : function(label, data) {
++	        return "Running threads " + data.y;
++	    },
++
++	    xScale : function() {
++	    	return "";
++	    },
++
++		render : function(){
++			var lineChart = null;
++
++			if(this.props.threadValues){
++
++				var data = [
++				        {
++				        	label: 'Thread count',
++				            values: [ ]
++				         }
++				    ];
++
++				for(var no = 0; no < this.props.threadValues.length; no++){
++					data[0].values.push({x : no + 1, y : this.props.threadValues[no]});
++				};
++
++				lineChart = <LineChart
++				                data={data}
++								width={400}
++				                height={400}
++				                margin={{top: 10, bottom: 50, left: 50, right: 10}}
++				                tooltipContained
++			                    tooltipHtml={this.tooltipLine}
++				                shapeColor={"red"}
++				 				xAxis={{tickValues: []}}
++								/>;
++			}
++
++			return (
++					<div>
++						<h4>Currently running threads in ODF</h4>
++						{lineChart}
++					</div>);
++		}
++
++
++	}),
++
++	SystemDiagnostics : React.createClass({
++		mixins : [AJAXCleanupMixin],
++
++		getODFStatus : function(){
++			var currentState = this.state;
++
++			const url = ODFGlobals.engineUrl + "/status";
++	        var req = $.ajax({
++	            url: url,
++	            contentType: "application/json",
++	            dataType: 'json',
++	            type: 'GET',
++	            success: function(data) {
++	            	if(currentState == null){
++	            		currentState = { threadValues : [0]};
++	            	}
++	            	currentState.threadValues.push(data.threadManagerStatus.length);
++	                if(currentState.threadValues.length > 5){
++	                	currentState.threadValues.splice(0, 1);
++	                }
++
++	            	this.setState(currentState);
++	            }.bind(this),
++	            error: function(xhr, status, err) {
++	              var msg = "ODF status request failed, " + err.toString();
++	              this.props.alertCallback({type: "danger", message: msg});
++	            }.bind(this)
++	        });
++
++	        this.storeAbort(req.abort);
++		},
++
++		componentWillMount : function() {
++			this.getODFStatus();
++		},
++
++		componentWillUnmount () {
++		    this.refreshInterval && clearInterval(this.refreshInterval);
++		    this.refreshInterval = false;
++		},
++
++		componentWillReceiveProps: function(nextProps){
++			if(!nextProps.visible){
++				 this.refreshInterval && clearInterval(this.refreshInterval);
++				 this.refreshInterval = false;
++			}else if(!this.refreshInterval){
++				this.refreshInterval = window.setInterval(this.getODFStatus, GRAPH_REFRESH_DELAY_MS);
++			}
++		},
++
++		tooltipLine : function(label, data) {
++	        return "Running threads " + data.y;
++	    },
++
++	    xScale : function() {
++	    	return "";
++	    },
++
++		render : function(){
++			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
++
++			var threadGraph = null;
++			if(this.state){
++				progressIndicator = null;
++				threadGraph = <ODFStats.CurrentThreadGraph threadValues={this.state.threadValues} />;
++			}
++
++			return (
++					<div>
++						{progressIndicator}
++						{threadGraph}
++					</div> );
++		}
++	}),
++
++	TotalAnalysisGraph : React.createClass({
++		mixins : [AJAXCleanupMixin],
++
++		getAnalysisStats : function() {
++			const url = ODFGlobals.analysisUrl + "/stats";
++	        var req = $.ajax({
++	            url: url,
++	            contentType: "application/json",
++	            dataType: 'json',
++	            type: 'GET',
++	            success: function(data) {
++	               this.setState(data);
++	            }.bind(this),
++	            error: function(xhr, status, err) {
++	              var msg = "Analysis stats request failed, " + err.toString();
++	              this.props.alertCallback({type: "danger", message: msg});
++	            }.bind(this)
++	        });
++
++	        this.storeAbort(req.abort);
++		},
++
++		componentWillMount : function() {
++			this.getAnalysisStats();
++		},
++
++		componentWillUnmount () {
++		    this.refreshInterval && clearInterval(this.refreshInterval);
++		    this.refreshInterval = false;
++		},
++
++		componentWillReceiveProps: function(nextProps){
++			if(!nextProps.visible){
++				 this.refreshInterval && clearInterval(this.refreshInterval);
++				 this.refreshInterval = false;
++			}else if(!this.refreshInterval){
++				this.refreshInterval = window.setInterval(this.getAnalysisStats, GRAPH_REFRESH_DELAY_MS);
++			}
++		},
++
++		tooltipPie : function(x, y) {
++		    return y.toString() + " absolute";
++		},
++
++		render : function() {
++			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
++			var pieChart = null;
++
++			if(this.state){
++				progressIndicator = null;
++				var succ = (this.state.success ? this.state.success : (this.state.failure ? 0 : 100));
++				var fail = (this.state.failure ? this.state.failure : 0);
++				var onePercent = (succ + fail) / 100;
++
++				var succVal = (onePercent == 0 ? 100 : (succ / onePercent)).toFixed(2);
++				var failVal = (onePercent == 0 ? 0 : (fail / onePercent)).toFixed(2);
++
++				var pieData = {label: "Total success and failure",
++								values : [{x: "Finished requests (" + succVal + " %)", y: succ},
++								          {x: "Failed requests (" + failVal + " %)", y: fail}
++									]
++								};
++
++				var colorScale = d3.scale.ordinal().range(["lightgreen", "#F44336"]);
++
++				var pieStyle = {opacity : "1 !important"};
++				pieChart = (<PieChart
++	                    data={pieData}
++	                    width={800}
++	                    height={400}
++	                    margin={{top: 10, bottom: 10, left: 200, right: 200}}
++	                    tooltipHtml={this.tooltipPie}
++	                    tooltipOffset={{top: 175, left: 200}}
++						tooltipMode={"fixed"}
++						style={pieStyle}
++   				      	colorScale={colorScale}
++	                    />);
++			}
++			return (
++					<div>
++						<h4>Total analysis requests and failures</h4>
++			           	{progressIndicator}
++						{pieChart}
++						<hr />
++					</div>);
++		}
++	}),
++
++	PerServiceStatusGraph : React.createClass({
++		mixins : [AJAXCleanupMixin],
++
++		getServiceStatus : function() {
++			const url = ODFGlobals.servicesUrl + "/status";
++	        var req = $.ajax({
++	            url: url,
++	            contentType: "application/json",
++	            dataType: 'json',
++	            type: 'GET',
++	            success: function(data) {
++	               this.setState(data);
++	            }.bind(this),
++	            error: function(xhr, status, err) {
++	              var msg = "Analysis stats request failed, " + err.toString();
++	              this.props.alertCallback({type: "danger", message: msg});
++	            }.bind(this)
++	        });
++
++	        this.storeAbort(req.abort);
++		},
++
++		componentWillMount : function() {
++			this.getServiceStatus();
++		},
++
++		componentWillUnmount () {
++		    this.refreshInterval && clearInterval(this.refreshInterval);
++		    this.refreshInterval = false;
++		},
++
++		componentWillReceiveProps: function(nextProps){
++			if(!nextProps.visible){
++				 this.refreshInterval && clearInterval(this.refreshInterval);
++				 this.refreshInterval = false;
++			}else if(!this.refreshInterval){
++				this.refreshInterval = window.setInterval(this.getServiceStatus, GRAPH_REFRESH_DELAY_MS);
++			}
++		},
++
++		tooltip : function(x, y0, y, total) {
++			var barData = this.getBarData();
++			var text = y;
++			var name = null;
++			if(barData && barData.length > 0){
++				$.map(barData, function(res){
++					var bData = barData;
++					$.map(res.values, function(val){
++						if(val.x == x && val.y == y){
++							name = val.fullName;
++						}
++					});
++				});
++			}
++
++			var tooltipStyle = {top : "-20px", position: "absolute", left: "-100px", "minWidth" : "350px"};
++
++			if(name == null){
++				tooltipStyle.left = 0;
++			}
++
++		    return (
++		    		<div style={tooltipStyle}>
++		    			<span>{name}, {text}</span>
++		    		</div>
++		    		);
++		},
++
++		getBarData : function(){
++			if(this.state && !$.isEmptyObject(this.state)){
++				var currentState = this.state;
++				var statusMap = {};
++				$.map(currentState, function(res){
++					var states = res.statusCountMap;
++					$.each(states, function(state, count){
++						var currentArr = statusMap[state];
++						if(currentArr === undefined){
++							currentArr = [];
++						}
++
++						var lbl = (res.name ? res.name : res.id);
++						//only shorten names if more than 1 bar is displayed
++						if(currentState && Object.keys(currentState) && Object.keys(currentState).length > 1 && lbl && lbl.length > 17){
++							lbl = lbl.substring(0, 17) + "..";
++						}
++
++						currentArr.push({"x" : lbl, "y": count, "fullName" : res.name});
++						statusMap[state] = currentArr;
++					});
++				});
++
++				var barData = [];
++
++				$.each(statusMap, function(key, val){
++					barData.push({"label" : key, "values" : val});
++				});
++
++				barData = barData.reverse();
++				return barData;
++			}else{
++				return [ { "label" : "No data available", "values" : [{"x" : "No data availbale", "y" : 0}]}];
++			}
++		},
++
++		getLegend : function(barData, colors){
++			var lbls = [];
++			for(var no = 0; no < barData.length; no++){
++				lbls.push(<div key={no} ><span style={{color: colors[no]}}>{barData[no].label}</span><br/></div>);
++			};
++
++			return (
++					<div style={{float:"right"}}>
++						{lbls}
++					</div>
++				);
++		},
++
++		render : function() {
++			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
++			var barChart = null;
++
++			if(this.state){
++				progressIndicator = null;
++				var barData = this.getBarData();
++
++				var barStyle = {marginTop: "50px"};
++
++				var barChart = (<BarChart
++				  width={400}
++		          height={400}
++		          margin={{top: 70, bottom: 50, left: 50, right: 10}}
++				  tooltipHtml={this.tooltip}
++			      tooltipMode={"element"}/>);
++
++				//cancelled, initialized, error, running, in queue, finished
++				var colors = ["black", "#F44336", "lightgreen", "blue", "lightblue", "grey"];
++				var colorScale = d3.scale.ordinal().range(colors);
++
++				if(barData != null){
++					var barWidth = (Object.keys(this.state).length >= 2 ? Object.keys(this.state).length * 200 : 400);
++
++					barChart = (
++								<div style={barStyle}>
++									{this.getLegend(barData, colors)}
++									<BarChart
++									  data={barData}
++									  width={barWidth}
++							          height={400}
++								      colorScale={colorScale}
++							          margin={{top: 30, bottom: 50, left: 50, right: 10}}
++									  tooltipHtml={this.tooltip}
++								      tooltipMode={"element"}
++									/>
++								</div>
++							);
++				}
++			}
++
++			return (
++					<div>
++						<h4>Analysis runs per service</h4>
++			           	{progressIndicator}
++						{barChart}
++					</div>);
++			}
++		})
++}
++
++module.exports = ODFStats;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js b/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js
+new file mode 100755
+index 0000000..a6bc381
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js
+@@ -0,0 +1,316 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ReactBootstrap = require("react-bootstrap");
++
++var Label = ReactBootstrap.Label;
++var ListGroup = ReactBootstrap.ListGroup;
++var ListGroupItem = ReactBootstrap.ListGroupItem;
++var Glyphicon = ReactBootstrap.Glyphicon;
++
++/*
++ * for every data type a UI specification can be created.
++ * A UI specification is an array of objects.
++ *
++ * Normally, the key of a property will be used to find a matching ui spec.
++ * This can be overwritten by defining the uiSpec attribute on a property object.
++ *
++ * Each object requires a key to identify the property and a label that will be displayed
++ * In order to manipulate the value or how it is displayed, a property object can pass a function to the func attribute.
++ * This function will be called with the property value and the object as parameters.
++ *
++ * Properties with an array as their value will automatically be displayed in a grid.
++ * A UI specification that is used for a grid can have a property object with the attribute sort:true, causing the tablet to be sorted alphabetically on this property
++*/
++
++var UISpec = {
++
++		DefaultDocument : {
++			attributes: [
++			            {key: "name", label: "Name"},
++			            {key: "description", label: "Description"},
++			            {key: "type", label: "Type"}],
++		    icon: <Glyphicon glyph="question-sign" />
++		},
++
++		DefaultDocuments : {
++			attributes: [
++			            {key: "name", label: "Name"},
++			            {key: "description", label: "Description"},
++			            {key: "type", label: "Type"}],
++		    icon: <Glyphicon glyph="question-sign" />
++		},
++
++		Document : {
++			attributes: [{key: "reference.id", label: "ID"},
++		            {key: "name", label: "Name"},
++		            {key: "type", label: "Type"}],
++		    icon: <Glyphicon glyph="file" />
++		},
++
++		Documents : {
++			attributes: [{key: "name", label: "Name"},
++		            {key: "description", label: "Description"},
++		            {key: "columns", label: "Columns"} ,
++		            {key: "annotations", label: "Annotations",
++		            	 func: function(val){
++		            		 if(!val){
++		            			 return 0;
++		            		 }
++		            		 return val.length;
++		            	 }
++		             }],
++		   icon: <Glyphicon glyph="file" />
++		},
++
++		DataFile : {
++			attributes: [{key: "name", label: "Name"},
++			          {key: "description", label: "Description"},
++			          {key: "columns", label: "Columns"} ,
++			          {key: "annotations", label: "Annotations"}],
++			icon: <Glyphicon glyph="list-alt" />
++		},
++
++		DataFiles: {
++			attributes: [{key: "name", label: "Name"},
++		             {key: "columns", label: "Columns"} ,
++		             {key: "annotations", label: "Annotations",
++		            	 func: function(val){
++		            		 if(!val){
++		            			 return 0;
++		            		 }
++		            		 return val.length;
++		            	 }
++		             }],
++		    icon: <Glyphicon glyph="list-alt" />
++		},
++
++		Table : {
++			attributes: [{key: "schema", label: "Schema"},
++		         {key: "name", label: "Name"},
++		         {key:"description", label: "Description"},
++		         {key:"columns", label: "Columns"} ,
++		         {key: "annotations", label: "Annotations"}],
++     	    icon: <Glyphicon glyph="th" />
++		},
++
++		Tables : {
++			attributes: [ // {key: "schema", label: "Schema"},
++	          {key: "name", label: "Name"},
++		          {key: "columns", label: "Columns"} ,
++		          {key: "annotations", label: "Annotations",
++		        	  func: function(val){
++		        		  if(!val){
++		        			  return 0;
++		        			  }
++		        		  return val.length;
++		        		}
++		          }],
++     	    icon: <Glyphicon glyph="th" />
++		},
++
++		column : {
++			attributes: [{key: "name", label: "Name"},
++		          {key: "dataType", label: "Datatype"},
++		          {key: "annotations", label: "Annotations"}],
++
++		    icon: <Glyphicon glyph="th-list" />
++		},
++
++		columns : {
++			attributes: [{key: "name", label: "Name", sort: true},
++		           {key: "dataType", label: "Datatype"},
++		           {key: "annotations", label: "Annotations",
++			        	  func: function(val){
++			        		  if(!val){
++			        			  return 0;
++			        		  }
++			        		  return val.length;
++			        		}
++			          }],
++		    icon: <Glyphicon glyph="th-list" />
++		},
++
++		//InferredDataClass	AnalysisRun	AnnotationType	JavaClass	Annotations	AnnotatedObject	Reference	JsonProperties
++		annotation : {
++			attributes: [{key: "annotationType", label: "Annotation type"},
++
++		              // see Infosphere DQ service: ColumnAnalysisTableAnnotation
++                      {key: "dataClassDistribution", label: "Data Class Distribution",
++		                  func: function(val) {
++		                      if (val) {
++		                          return <span>{JSON.stringify(val)}</span>;
++		                      }
++		                  }
++                      },
++                      // see Infosphere DQ service: ColumnAnalysisColumnAnnotation
++		              {key: "inferredDataClass", label: "Data Class",
++		                func: function(val) {
++		                     if (val) {
++                                if(val.className){
++                                    var confidence = "";
++                                    if (val.confidenceThreshold) {
++                                        confidence = " ("+val.confidenceThreshold+")";
++                                    }
++                                    return <span>{val.className}{confidence}</span>;
++                                }
++                                return <span>{JSON.stringify(val)}</span>;
++		                     }
++                          }
++		              },
++                      {key: "qualityScore", label: "Data Quality Score"},
++
++		              // see alchemy taxonomy service: TaxonomyDiscoveryService.TaxonomyAnnotation
++		              {key: "label", label: "Category"},
++                      {key: "score", label: "Score"},
++
++		              {key: "analysisRun", label:"Analysis"},
++		              {key: "jsonProperties", label: "Properties"}
++		              ],
++  		    icon: <Glyphicon glyph="tag" />
++		},
++
++		annotations : {
++			attributes: [{key: "annotationType", label: "Annotation type"},
++			             {key: "analysisRun", label:"Analysis"}],
++		  	icon: <Glyphicon glyph="tag" />
++		},
++
++	    request : {
++	    	attributes:[
++	               {key: "request.id", label: "Request ID"},
++	               {key: "state", label: "Status",
++	            	   func: function(val){
++	            		   var btnCss = {};
++	                       var statusLabel = <Label bsStyle="warning">Unknown</Label>;
++	                       if (val == "ACTIVE") {
++	                          statusLabel = <Label bsStyle="info">Active</Label>;
++	                       } else if (val== "QUEUED") {
++	                          statusLabel = <Label bsStyle="info">Queued</Label>;
++	                       } else if (val== "CANCELLED") {
++	                          statusLabel = <Label bsStyle="warning">Cancelled</Label>;
++	                       } else if (val== "FINISHED") {
++	                          statusLabel = <Label bsStyle="success">Finished</Label>;
++	                       } else if (val== "ERROR") {
++	                          statusLabel = <Label bsStyle="danger">Error</Label>;
++	                       }
++	                       return statusLabel;
++	            	   }},
++            	   {key: "request.dataSets", label: "Data sets", uiSpec: "DefaultDocuments"},
++            	   {key: "totalTimeOnQueues", label: "Total time on queues", func: function(val){
++            		   if(val){
++            			   	var x = val / 1000;
++            			    var seconds = Math.floor(x % 60);
++            			    x /= 60;
++            			    var minutes = Math.floor(x % 60);
++            			    x /= 60;
++            			    var hours = Math.floor(x % 24);
++
++            			    return hours + "h " + minutes + "m " + seconds + " s";
++            		   }
++            		   return "";
++            	   }},
++            	   {key: "totalTimeProcessing", label: "Total time processing", func: function(val){
++            		   if(val){
++            			   	var x = val / 1000;
++            			    var seconds = Math.floor(x % 60);
++            			    x /= 60;
++            			    var minutes = Math.floor(x % 60);
++            			    x /= 60;
++            			    var hours = Math.floor(x % 24);
++
++            			    return hours + "h " + minutes + "m " + seconds + " s";
++            		   }
++            		   return "";
++            	   }},
++            	   {key: "totalTimeStoringAnnotations", label: "Total time storing results", func: function(val){
++            		   if(val){
++            			   	var x = val / 1000;
++            			    var seconds = Math.floor(x % 60);
++            			    x /= 60;
++            			    var minutes = Math.floor(x % 60);
++            			    x /= 60;
++            			    var hours = Math.floor(x % 24);
++
++            			    return hours + "h " + minutes + "m " + seconds + " s";
++            		   }
++            		   return "";
++            	   }},
++	               {key: "serviceRequests", label: "Service Sequence", func: function(val, obj){
++	            	   var serviceNames = [];
++	            	   var services = [];
++	            	   for (var i=0; i<val.length; i++) {
++	                       var dsreq = val[i];
++	                       var dsName = dsreq.discoveryServiceName;
++	                       if(serviceNames.indexOf(dsName) == -1){
++	                    	   serviceNames.push(dsName);
++	                    	   services.push(<span key={dsName}>{dsName}<br/></span>);
++	                       }
++	                   }
++
++	                   return <em>{services}</em>;
++	               	}
++	               },
++	               {key: "details", label: "Status Details"}
++	               ],
++	   		  	icon: <Glyphicon glyph="play-circle" />
++	    },
++
++	    requests : {
++	    	attributes: [
++		               {key: "request.id", label: "Request ID"},
++		               {key: "status", label: "Status",
++		            	   func: function(val){
++		                       var statusLabel = <Label bsStyle="warning">Unknown</Label>;
++		                       if (val == "INITIALIZED") {
++		                          statusLabel = <Label bsStyle="info">Initialized</Label>;
++		                       } else if (val== "IN_DISCOVERY_SERVICE_QUEUE") {
++		                          statusLabel = <Label bsStyle="info">Queued</Label>;
++		                       } else if (val== "DISCOVERY_SERVICE_RUNNING") {
++		                           statusLabel = <Label bsStyle="info">Running</Label>;
++		                       } else if (val== "CANCELLED") {
++		                          statusLabel = <Label bsStyle="warning">Cancelled</Label>;
++		                       } else if (val== "FINISHED") {
++		                          statusLabel = <Label bsStyle="success">Finished</Label>;
++		                       } else if (val== "ERROR") {
++		                          statusLabel = <Label bsStyle="danger">Error</Label>;
++		                       }
++		                       return statusLabel;
++		            	   }},
++		               {key: "lastModified", label: "Last modified", func: function(val){
++		            	   return new Date(val).toLocaleString();
++		               }},
++		               {key: "discoveryServiceRequests", label: "Service sequence", func: function(val, obj){
++		            	   var serviceNames = [];
++		            	   var services = [];
++		            	   for (var i=0; i<val.length; i++) {
++		                       var dsreq = val[i];
++		                       var dsName = dsreq.discoveryServiceName;
++		                       if(serviceNames.indexOf(dsName) == -1){
++		                    	   serviceNames.push(dsName);
++		                    	   services.push(<span key={dsName}>{dsName}<br/></span>);
++		                       }
++		                   }
++
++		                   return <ListGroup>{services}</ListGroup>;
++		               }},
++		               {key: "statusDetails", label: "Status Details"}
++		               ],
++	   	icon: <Glyphicon glyph="play-circle" />
++	    }
++};
++
++module.exports = UISpec;
+diff --git a/odf/odf-web/src/main/webapp/scripts/odf-utils.js b/odf/odf-web/src/main/webapp/scripts/odf-utils.js
+new file mode 100755
+index 0000000..5684556
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/scripts/odf-utils.js
+@@ -0,0 +1,338 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var $ = require("jquery");
++var React = require("react");
++var ODFGlobals = require("./odf-globals.js");
++
++var Utils= {
++
++	arraysEqual : function(arr1, arr2){
++		var a = arr1;
++		var b = arr2;
++		if(arr1 == null){
++			if(arr2 == null){
++				return true;
++			}
++			return false;
++		}else{
++			if(arr2 == null){
++				return false;
++			}
++		}
++
++		if(a.length != b.length){
++			return false;
++		}
++
++		var equal = true;
++		$.each(a, function(key, val){
++			if(a[key] && !b[key]){
++				equal = false;
++				return;
++			}
++			if(val && typeof val == "object"){
++				equal = this.arraysEqual(val, b[key]);
++				return;
++			}else{
++				if(val != b[key]){
++					equal = false;
++					return;
++				}
++			}
++		}.bind(this));
++		return equal;
++	},
++
++	AnnotationStoreHelper : {
++		loadAnnotationsForRequest : function(analysisRequestId, successCallback, errorCallback) {
++		    var url = ODFGlobals.annotationsUrl + "?analysisRequestId=" + analysisRequestId;
++            return $.ajax({
++               url: url,
++               type: 'GET',
++               success: function(data) {
++                   if(successCallback){
++                       successCallback(data);
++                   }
++               },
++               error: function(xhr, status, err) {
++                   if(errorCallback){
++                       errorCallback(err);
++                   }
++               }
++            });
++		}
++	},
++
++	AtlasHelper : {
++
++		loadAtlasAssets : function(assets, successCallback, errorCallback){
++			var reqs = [];
++			$.each(assets, function(key, val){
++				reqs.push(this.loadAtlasAsset(val, successCallback, errorCallback));
++			}.bind(this));
++			return reqs;
++		},
++
++		loadMostRecentAnnotations : function(asset, successCallback, errorCallback) {
++		    var url = ODFGlobals.annotationsUrl + "/newestAnnotations/" + encodeURIComponent(JSON.stringify({repositoryId: asset.repositoryId, id: asset.id}));
++            return $.ajax({
++               url: url,
++               type: 'GET',
++               success: function(data) {
++                   if(successCallback){
++                       successCallback(data);
++                   }
++               },
++               error: function(xhr, status, err) {
++                   if(errorCallback){
++                       errorCallback(err);
++                   }
++               }
++            });
++		},
++
++		loadRelationalDataSet: function(dataSet, successCallback, errorCallback) {
++			var url = ODFGlobals.metadataUrl + "/asset/" + encodeURIComponent(JSON.stringify({repositoryId: dataSet.reference.repositoryId, id: dataSet.reference.id})) + "/columns";
++			return $.ajax({
++				url: url,
++				type: 'GET',
++				error: function(xhr, status, err) {
++					if(errorCallback){
++						errorCallback(err);
++					}
++				}
++			}).then( function(cols){
++				if(!cols){
++					successCallback([]);
++					return [];
++				}
++				var requests = [];
++				var colRefs = [];
++				$.each(cols, function(key, val){
++					var req = Utils.AtlasHelper.getColAnnotations(val);
++					requests.push(req);
++					colRefs.push(val.reference);
++				}.bind(this));
++				dataSet.columns = colRefs;
++				$.when.apply(undefined, requests).done(function(){
++					var data = [];
++					if(requests.length > 1){
++						$.each(arguments, function(key, val){
++							data.push(val);
++						});
++					}else if(arguments[0]){
++						data.push(arguments[0]);
++					}
++					successCallback(data);
++				});
++				return requests;
++			})
++		},
++
++		getColAnnotations: function(asset, successCallback, errorCallback) {
++			var refid = asset.reference.id;
++		   var annotationsUrl = ODFGlobals.annotationsUrl + "?assetReference=" + encodeURIComponent(refid);
++		   return $.ajax({
++			   url: annotationsUrl,
++			   type: 'GET',
++			   success: function(annotationData) {
++				   asset.annotations = annotationData.annotations;
++				   if (successCallback) {
++					   successCallback(asset);
++				   }
++			   },
++			   error: function(xhr, status, err) {
++				   if(errorCallback){
++					   errorCallback(err);
++				   }
++			   }
++		   }).then(function(annotationData) {
++			   asset.annotations = annotationData.annotations;
++			   return asset;
++		   });
++		},
++
++		loadAtlasAsset : function(asset, successCallback, errorCallback){
++			var url = ODFGlobals.metadataUrl + "/asset/" + encodeURIComponent(JSON.stringify({repositoryId: asset.repositoryId, id: asset.id}));
++			return $.ajax({
++		       url: url,
++		       type: 'GET',
++		       error: function(xhr, status, err) {
++		    	   if(errorCallback){
++		    		   errorCallback(err);
++		    	   }
++		       }
++			}).then( function(data) {
++	    		   var refid = data.reference.id;
++	    		   var annotationsUrl = ODFGlobals.annotationsUrl + "?assetReference=" + encodeURIComponent(refid);
++	    		   return $.ajax({
++	    			  url: annotationsUrl,
++	    			  type: 'GET',
++	    			  success: function(annotationData) {
++	    				  data.annotations = annotationData.annotations;
++	    				  if (successCallback) {
++	    					  successCallback(data);
++	    				  }
++	    			  },
++	    			  error: function(xhr, status, err) {
++	    				  if(errorCallback){
++	    					  errorCallback(err);
++	    				  }
++	    			  }
++	    		   }).then(function(annotationData) {
++	     			   data.annotations = annotationData.annotations;
++	    			   return data;
++	    		   });
++			});
++		},
++
++		searchAtlasMetadata : function(query, successCallback, errorCallback) {
++			var url = ODFGlobals.metadataUrl + "/search?" + $.param({query: query});
++			var req = $.ajax({
++				url: url,
++				dataType: 'json',
++				type: 'GET',
++				success: function(data) {
++					successCallback(data);
++				},
++				error: function(xhr, status, err) {
++					console.error(url, status, err.toString());
++					var msg = "Error while loading recent analysis requests: " + err.toString();
++					errorCallback(msg);
++				}
++			});
++			return req;
++		}
++	},
++
++	MetadataStore : {
++
++		getProperties(successCallback, alertCallback) {
++			if (alertCallback) {
++				alertCallback({type: ""});
++			}
++			return $.ajax({
++				url: ODFGlobals.metadataUrl,
++				dataType: 'json',
++				type: 'GET',
++				success: successCallback,
++				error: function(xhr, status, err) {
++					if (alertCallback) {
++						var msg = "Error while reading metadata store properties: " + err.toString();
++						alertCallback({type: "danger", message: msg});
++					}
++				}
++			});
++		}
++	},
++
++	ConfigurationStore : {
++
++	  // readUserDefinedProperties(successCallback, alertCallback) {
++	   readConfig(successCallback, alertCallback) {
++		   if (alertCallback) {
++		     alertCallback({type: ""});
++		   }
++	     // clear alert
++
++	     return $.ajax({
++	       url: ODFGlobals.apiPrefix + "settings",
++	       dataType: 'json',
++	       type: 'GET',
++	       success: successCallback,
++	       error: function(xhr, status, err) {
++	         if (alertCallback) {
++	            var msg = "Error while reading user defined properties: " + err.toString();
++	            alertCallback({type: "danger", message: msg});
++	         }
++	       }
++	      });
++	   },
++
++	   updateConfig(config, successCallback, alertCallback) {
++			if (alertCallback) {
++				 alertCallback({type: ""});
++			}
++
++		    return $.ajax({
++			       url: ODFGlobals.apiPrefix + "settings",
++			       contentType: "application/json",
++			       dataType: 'json',
++			       type: 'PUT',
++			       data: JSON.stringify(config),
++			       success: successCallback,
++			       error: function(xhr, status, err) {
++			         if (alertCallback) {
++			            var msg = "Error while reading user defined properties: " + err.toString();
++			            alertCallback({type: "danger", message: msg});
++			         }
++			       }
++		     });
++	   }
++	},
++
++	ServicesStore : {
++
++	  // readUserDefinedProperties(successCallback, alertCallback) {
++	   getServices(successCallback, alertCallback) {
++		   if (alertCallback) {
++		     alertCallback({type: ""});
++		   }
++	     // clear alert
++
++	     return $.ajax({
++	       url: ODFGlobals.apiPrefix + "services",
++	       dataType: 'json',
++	       type: 'GET',
++	       success: successCallback,
++	       error: function(xhr, status, err) {
++	         if (alertCallback) {
++	            var msg = "Error while getting list of ODF services: " + err.toString();
++	            alertCallback({type: "danger", message: msg});
++	         }
++	       }
++	      });
++	   }
++	},
++
++	URLHelper : {
++
++		getBaseHash : function(){
++			var baseHash = "#" + document.location.hash.split("#")[1];
++			var split = baseHash.split("/");
++			if(split.length>0){
++				return split[0];
++			}
++			return "";
++		},
++
++		setUrlHash : function(newAddition){
++			if(!newAddition){
++				newAddition = "";
++			}
++			if(newAddition != "" && typeof newAddition === "object"){
++				newAddition = JSON.stringify(newAddition);
++			}
++			var hash = document.location.hash;
++			var baseHash = this.getBaseHash();
++			if(!hash.startsWith(baseHash)){
++				return;
++			}
++			document.location.hash = baseHash + "/" + encodeURIComponent(newAddition);
++		}
++	}
++};
++
++module.exports = Utils;
+diff --git a/odf/odf-web/src/main/webapp/swagger/index.html b/odf/odf-web/src/main/webapp/swagger/index.html
+new file mode 100755
+index 0000000..28360b1
+--- /dev/null
++++ b/odf/odf-web/src/main/webapp/swagger/index.html
+@@ -0,0 +1,125 @@
++
++<!DOCTYPE html>
++<html>
++<head>
++  <meta charset="UTF-8">
++  <title>Swagger UI</title>
++  <link rel="icon" type="image/png" href="images/favicon-32x32.png" sizes="32x32" />
++  <link rel="icon" type="image/png" href="images/favicon-16x16.png" sizes="16x16" />
++  <link href='css/typography.css' media='screen' rel='stylesheet' type='text/css'/>
++  <link href='css/reset.css' media='screen' rel='stylesheet' type='text/css'/>
++  <link href='css/screen.css' media='screen' rel='stylesheet' type='text/css'/>
++  <link href='css/reset.css' media='print' rel='stylesheet' type='text/css'/>
++  <link href='css/print.css' media='print' rel='stylesheet' type='text/css'/>
++  <script src='lib/jquery-1.8.0.min.js' type='text/javascript'></script>
++  <script src='lib/jquery.slideto.min.js' type='text/javascript'></script>
++  <script src='lib/jquery.wiggle.min.js' type='text/javascript'></script>
++  <script src='lib/jquery.ba-bbq.min.js' type='text/javascript'></script>
++  <script src='lib/handlebars-2.0.0.js' type='text/javascript'></script>
++  <script src='lib/underscore-min.js' type='text/javascript'></script>
++  <script src='lib/backbone-min.js' type='text/javascript'></script>
++  <script src='swagger-ui.js' type='text/javascript'></script>
++  <script src='lib/highlight.7.3.pack.js' type='text/javascript'></script>
++  <script src='lib/jsoneditor.min.js' type='text/javascript'></script>
++  <script src='lib/marked.js' type='text/javascript'></script>
++  <script src='lib/swagger-oauth.js' type='text/javascript'></script>
++
++  <!-- Some basic translations -->
++  <!-- <script src='lang/translator.js' type='text/javascript'></script> -->
++  <!-- <script src='lang/ru.js' type='text/javascript'></script> -->
++  <!-- <script src='lang/en.js' type='text/javascript'></script> -->
++
++  <script type="text/javascript">
++    $(function () {
++      var url = window.location.search.match(/url=([^&]+)/);
++      if (url && url.length > 1) {
++        url = decodeURIComponent(url[1]);
++      } else {
++        url = "swagger.json";
++      }
++
++      // Pre load translate...
++      if(window.SwaggerTranslator) {
++        window.SwaggerTranslator.translate();
++      }
++      window.swaggerUi = new SwaggerUi({
++        url: url,
++        validatorUrl: null,
++        dom_id: "swagger-ui-container",
++        supportedSubmitMethods: ['get', 'post', 'put', 'delete', 'patch'],
++        onComplete: function(swaggerApi, swaggerUi){
++          if(typeof initOAuth == "function") {
++            initOAuth({
++              clientId: "your-client-id",
++              clientSecret: "your-client-secret-if-required",
++              realm: "your-realms",
++              appName: "your-app-name", 
++              scopeSeparator: ",",
++              additionalQueryStringParams: {}
++            });
++          }
++
++          if(window.SwaggerTranslator) {
++            window.SwaggerTranslator.translate();
++          }
++
++          $('pre code').each(function(i, e) {
++            hljs.highlightBlock(e)
++          });
++
++          addApiKeyAuthorization();
++        },
++        onFailure: function(data) {
++          log("Unable to Load SwaggerUI");
++        },
++        docExpansion: "none",
++        jsonEditor: false,
++        apisSorter: "alpha",
++        defaultModelRendering: 'schema',
++        showRequestHeaders: false
++      });
++
++      function addApiKeyAuthorization(){
++        var key = encodeURIComponent($('#input_apiKey')[0].value);
++        if(key && key.trim() != "") {
++            var apiKeyAuth = new SwaggerClient.ApiKeyAuthorization("api_key", key, "query");
++            window.swaggerUi.api.clientAuthorizations.add("api_key", apiKeyAuth);
++            log("added key " + key);
++        }
++      }
++
++      $('#input_apiKey').change(addApiKeyAuthorization);
++
++      // if you have an apiKey you would like to pre-populate on the page for demonstration purposes...
++      /*
++        var apiKey = "myApiKeyXXXX123456789";
++        $('#input_apiKey').val(apiKey);
++      */
++
++      window.swaggerUi.load();
++
++      function log() {
++        if ('console' in window) {
++          console.log.apply(console, arguments);
++        }
++      }
++  });
++  </script>
++</head>
++
++<body class="swagger-section">
++<div id='header'>
++  <div class="swagger-ui-wrap">
++    <a id="logo" href="http://swagger.io">swagger</a>
++    <form id='api_selector'>
++      <div class='input'><input placeholder="http://example.com/api" id="input_baseUrl" name="baseUrl" type="text"/></div>
++      <div class='input'><input placeholder="api_key" id="input_apiKey" name="apiKey" type="text"/></div>
++      <div class='input'><a id="explore" href="#" data-sw-translate>Explore</a></div>
++    </form>
++  </div>
++</div>
++
++<div id="message-bar" class="swagger-ui-wrap" data-sw-translate>&nbsp;</div>
++<div id="swagger-ui-container" class="swagger-ui-wrap"></div>
++</body>
++</html>
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java
+new file mode 100755
+index 0000000..6f23c0d
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java
+@@ -0,0 +1,79 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.admin;
++
++import java.io.InputStream;
++import java.util.Collection;
++
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.engine.ServiceRuntimeInfo;
++import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
++import org.apache.atlas.odf.api.engine.SystemHealth;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class EngineResourceTest extends RestTestBase {
++
++	@Test
++	public void testHealth() throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/health");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Health check request returned: " + s);
++		checkResult(httpResp, HttpStatus.SC_OK);
++		SystemHealth health = JSONUtils.fromJSON(s, SystemHealth.class);
++		Assert.assertNotNull(health);
++	}
++	
++	boolean containsRuntimeWithName(Collection<ServiceRuntimeInfo> runtimes, String name) {
++		for (ServiceRuntimeInfo sri : runtimes) {
++			if (name.equals(sri.getName())) {
++				return true;
++			}
++		}
++		return false;
++	}
++	
++	@Test
++	public void testRuntimesInfo() throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/runtimes");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Runtime Info returned: " + s);
++		checkResult(httpResp, HttpStatus.SC_OK);
++		ServiceRuntimesInfo sri = JSONUtils.fromJSON(s, ServiceRuntimesInfo.class);
++		Assert.assertNotNull(sri);
++		Assert.assertTrue(sri.getRuntimes().size() > 2);
++		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "Java"));
++		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "Spark"));
++		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "HealthCheck"));
++
++	}
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java
+new file mode 100755
+index 0000000..d093a73
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java
+@@ -0,0 +1,97 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.admin;
++
++import org.apache.atlas.odf.api.settings.MessagingConfiguration;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpStatus;
++import org.junit.Assert;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class SettingsResourceTest extends RestTestBase {
++
++	@Test
++	public void testSettingsRead() throws Exception {
++		ODFSettings settings = settingsRead();
++		Assert.assertNotNull(settings);
++		MessagingConfiguration msgConfig = settings.getMessagingConfiguration();
++		Assert.assertNotNull(msgConfig);
++		Assert.assertTrue(msgConfig instanceof KafkaMessagingConfiguration);
++		KafkaMessagingConfiguration kafkaMsgConfig = (KafkaMessagingConfiguration) msgConfig;
++		Assert.assertNotNull(kafkaMsgConfig.getKafkaConsumerConfig());
++		Assert.assertNotNull(kafkaMsgConfig.getKafkaConsumerConfig().getZookeeperConnectionTimeoutMs());
++
++		Assert.assertNotNull(settings.getUserDefined());
++	}
++
++	@Test
++	public void testPasswordEncryption() throws Exception {
++		ODFSettings settings = settingsRead();
++		settings.setOdfPassword("newOdfPassword");
++		ODFSettings configWithPlainPasswords = settings;
++		settingsWrite(JSONUtils.toJSON(configWithPlainPasswords), HttpStatus.SC_OK);
++		logger.info("Settings with plain password: " + JSONUtils.toJSON(configWithPlainPasswords));
++
++		// REST API must return hidden password
++		ODFSettings configWithHiddenPasswords = settingsRead();
++		String hiddenPasswordIdentifyier = "***hidden***";
++		Assert.assertEquals(configWithHiddenPasswords.getOdfPassword(), hiddenPasswordIdentifyier);
++
++		// Reset passwords
++		Assert.assertNotNull(System.getProperty("odf.test.password"));
++		settings = settingsRead();
++		settings.setOdfPassword(Encryption.decryptText(System.getProperty("odf.test.password")));
++		settingsWrite(JSONUtils.toJSON(settings), HttpStatus.SC_OK);
++	}
++
++	@Test
++	public void testSettingsWriteSuccess() throws Exception {
++		String configSnippet = "{ \"runAnalysisOnImport\": false }";
++		logger.info("Testing write settings success with JSON: " + configSnippet);
++		settingsWrite(configSnippet, HttpStatus.SC_OK);
++	}
++	
++	@Test
++	public void testSettingsWriteFailure() throws Exception {
++		String configSnippet = "{ \"runAnalysisOnImport\": \"someInvalidValue\" }";
++		logger.info("Testing write settings failure with JSON: " + configSnippet);
++		settingsWrite(configSnippet, HttpStatus.SC_INTERNAL_SERVER_ERROR);
++	}
++
++	@Test
++	public void testSettingsReset() throws Exception {
++		logger.info("Testing reset settings operation.");
++		String updatedId = "updatedInstanceId";
++		ODFSettings originalConfig = settingsRead();
++		String originalInstanceId = originalConfig.getInstanceId();
++		originalConfig.setInstanceId(updatedId);
++
++		settingsWrite(JSONUtils.toJSON(originalConfig), HttpStatus.SC_OK);
++		
++		ODFSettings newConfig = settingsRead();
++		Assert.assertEquals(updatedId, newConfig.getInstanceId());
++
++		settingsReset();
++
++		ODFSettings resetConfig = settingsRead();
++		String resetInstanceId = resetConfig.getInstanceId();
++
++		Assert.assertEquals(originalInstanceId, resetInstanceId);
++	}
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java
+new file mode 100755
+index 0000000..21b7887
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java
+@@ -0,0 +1,47 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.analysis.test;
++
++import java.io.InputStream;
++
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpResponse;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Test;
++
++public class ODFVersionTest extends RestTestBase {
++
++	@Test
++	public void testVersion() throws Exception {
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/version");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Version request returned: " + s);
++
++		JSONObject jo = new JSONObject(s);
++		String version = jo.getString("version");
++		Assert.assertNotNull(version);
++		Assert.assertTrue(version.startsWith("1.2.0-"));
++	}
++
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java
+new file mode 100755
+index 0000000..4900d63
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java
+@@ -0,0 +1,174 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.annotations;
++
++import java.io.InputStream;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.UUID;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.annotation.Annotations;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.integrationtest.metadata.MetadataResourceTest;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.apache.http.entity.ContentType;
++import org.apache.wink.json4j.JSON;
++import org.apache.wink.json4j.JSONObject;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class AnnotationsResourceTest extends RestTestBase {
++	Logger logger = Logger.getLogger(AnnotationsResourceTest.class.getName());
++
++	@Before
++	public void createSampleData() throws Exception {
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		checkResult(httpResp, HttpStatus.SC_OK);
++	}
++
++	public static class AnnotationsResourceTestProfilingAnnotation extends ProfilingAnnotation {
++		private String newAnnotProp;
++
++		public String getNewAnnotProp() {
++			return newAnnotProp;
++		}
++
++		public void setNewAnnotProp(String newAnnotProp) {
++			this.newAnnotProp = newAnnotProp;
++		}
++
++	}
++
++	static String newAnnotPropValue = "newAnnotPropValue" + UUID.randomUUID().toString();
++	static String newAnnotPropKey = "newAnnotProp";
++
++	static String unknownAnnotType = "UnknownAnnotType" + UUID.randomUUID().toString();
++
++	List<Annotation> createTestAnnotations(MetaDataObjectReference ref, String reqId) {
++		List<Annotation> result = new ArrayList<>();
++		AnnotationsResourceTestProfilingAnnotation annot = new AnnotationsResourceTestProfilingAnnotation();
++		annot.setProfiledObject(ref);
++		annot.setNewAnnotProp(newAnnotPropValue);
++		annot.setAnalysisRun(reqId);
++		result.add(annot);
++
++		ProfilingAnnotation genericAnnot = new ProfilingAnnotation();
++		genericAnnot.setProfiledObject(ref);
++		genericAnnot.setAnalysisRun(reqId);
++		genericAnnot.setJsonProperties("{\"" + newAnnotPropKey + "\": \"" + newAnnotPropValue + "\"}");
++		result.add(genericAnnot);
++
++		return result;
++	}
++
++	MetaDataObjectReference getTestDataSetRef() throws Exception {
++		String s = MetadataResourceTest.getAllMetadataObjectsOfType("DataFile");
++		logger.info("Retrieved test data set refs: " + s);
++		List<DataFile> dfRefs = JSONUtils.fromJSONList(s, DataFile.class);
++		return dfRefs.get(0).getReference();
++	}
++
++	@Test
++	public void testAnnotationStore() throws Exception {
++		MetaDataObjectReference dfRef = getTestDataSetRef();
++		String reqId = "TestRequestId" + UUID.randomUUID().toString();
++		logger.info("Test Annotatoin store with request ID: " + reqId);
++		List<Annotation> newAnnots = createTestAnnotations(dfRef, reqId);
++
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		List<String> createdAnnotIds = new ArrayList<>();
++		// create annotations
++		for (Annotation annot : newAnnots) {
++			String restRequestBody = JSONUtils.toJSON(annot);
++			logger.info("Creating annotation via request " + restRequestBody);
++			Request req = Request.Post(getBaseURI() + "/annotations").bodyString(restRequestBody, ContentType.APPLICATION_JSON);
++			Response resp = exec.execute(req);
++			HttpResponse httpResp = resp.returnResponse();
++			checkResult(httpResp, HttpStatus.SC_CREATED);
++			InputStream is = httpResp.getEntity().getContent();
++			MetaDataObjectReference createdAnnot = JSONUtils.fromJSON(is, MetaDataObjectReference.class);
++			Assert.assertNotNull(createdAnnot);
++			Assert.assertNotNull(createdAnnot.getId());
++			createdAnnotIds.add(createdAnnot.getId());
++		}
++		logger.info("Annotations created, now retrieving them again: " + createdAnnotIds);
++
++		// check retrieval
++		Request req = Request.Get(getBaseURI() + "/annotations?assetReference=" + dfRef.getId());
++		Response resp = exec.execute(req);
++
++		HttpResponse httpResp = resp.returnResponse();
++		checkResult(httpResp, HttpStatus.SC_OK);
++		Annotations retrieveResult = JSONUtils.fromJSON(httpResp.getEntity().getContent(), Annotations.class);
++		List<Annotation> retrievedAnnots = retrieveResult.getAnnotations();
++		logger.info("Retrieved annotations: " + retrievedAnnots);
++		int foundAnnots = 0;
++		for (Annotation retrievedAnnot : retrievedAnnots) {
++			logger.info("Checking annotation: " + retrievedAnnot.getReference());
++			logger.info("Annotation " + retrievedAnnot.getReference().getId() + " has request ID: " + retrievedAnnot.getAnalysisRun());
++			if (reqId.equals(retrievedAnnot.getAnalysisRun())) {
++				logger.info("Checking annotation " + retrievedAnnot + " of class " + retrievedAnnot.getClass());
++				Assert.assertTrue(retrievedAnnot instanceof ProfilingAnnotation);
++
++				if (retrievedAnnot instanceof AnnotationsResourceTestProfilingAnnotation) {
++					AnnotationsResourceTestProfilingAnnotation tpa = (AnnotationsResourceTestProfilingAnnotation) retrievedAnnot;
++					Assert.assertEquals(dfRef, tpa.getProfiledObject());
++					Assert.assertEquals(newAnnotPropValue, tpa.getNewAnnotProp());
++				} else {
++					// other annotations are "unknown", thus no subclass of ProfilingAnnotation
++					Assert.assertTrue(retrievedAnnot.getClass().equals(ProfilingAnnotation.class));
++					
++					String jsonProps = retrievedAnnot.getJsonProperties();
++					Assert.assertNotNull(jsonProps);
++					JSONObject jo = (JSONObject) JSON.parse(jsonProps);
++					Assert.assertTrue(jo.containsKey(newAnnotPropKey));
++					Assert.assertEquals(newAnnotPropValue, jo.getString(newAnnotPropKey));
++				}
++				Assert.assertTrue(createdAnnotIds.contains(retrievedAnnot.getReference().getId()));
++				foundAnnots++;
++				
++				// check that retrieval by Id works
++				logger.info("Retrieving annotation " + retrievedAnnot.getReference().getId() + " again");
++				String url = getBaseURI() + "/annotations/objects/" + retrievedAnnot.getReference().getId();
++				logger.info("Retriveing annotation with URL: " + url);
++				Request req1 = Request.Get(url);
++				Response resp1 = exec.execute(req1);
++
++				HttpResponse httpResp1 = resp1.returnResponse();
++				checkResult(httpResp1, HttpStatus.SC_OK);
++				Annotation newRetrievedAnnot = JSONUtils.fromJSON(httpResp1.getEntity().getContent(), Annotation.class);
++				Assert.assertEquals(retrievedAnnot.getReference(), newRetrievedAnnot.getReference());
++				Assert.assertEquals(retrievedAnnot.getClass(), newRetrievedAnnot.getClass());
++				Assert.assertEquals(retrievedAnnot.getJsonProperties(), newRetrievedAnnot.getJsonProperties());
++			}
++		}
++		Assert.assertEquals(createdAnnotIds.size(), foundAnnots);
++
++	}
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java
+new file mode 100755
+index 0000000..d76a272
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java
+@@ -0,0 +1,81 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.metadata;
++
++import java.io.InputStream;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.apache.http.client.utils.URIBuilder;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.api.metadata.models.BusinessTerm;
++import org.apache.atlas.odf.api.metadata.models.DataFile;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class MetadataResourceTest extends RestTestBase {
++
++	static Logger logger = Logger.getLogger(MetadataResourceTest.class.getName());
++
++	@Before
++	public void createSampleData() throws Exception {
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		checkResult(httpResp, HttpStatus.SC_OK);
++	}
++
++	public static String getAllMetadataObjectsOfType(String dataType) throws Exception {
++		MetadataStore mdsForQueryGeneration = new ODFFactory().create().getMetadataStore();
++		String query = mdsForQueryGeneration.newQueryBuilder().objectType(dataType).build();
++		logger.info("Metadata search query metadata " + query);
++
++		URIBuilder builder = new URIBuilder(getBaseURI() + "/metadata/search").addParameter("query", query);
++		String uri = builder.build().toString();
++		logger.info("Searching against URL: " + uri);
++		Request req = Request.Get(uri);
++		Response response = getRestClientManager().getAuthenticatedExecutor().execute(req);
++		HttpResponse httpResp = response.returnResponse();
++		Assert.assertEquals(HttpStatus.SC_OK, httpResp.getStatusLine().getStatusCode());
++		InputStream is = httpResp.getEntity().getContent();
++		String s = JSONUtils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Response: " + s);
++		return s;
++	}
++
++	@Test
++	public void testMetadataResourceSearchOMDataFile() throws Exception {
++		String s = getAllMetadataObjectsOfType("DataFile");
++		Assert.assertTrue(s.contains("DataFile")); // minimal checking that JSON contains something useful and specific to this type
++		JSONUtils.fromJSONList(s, DataFile.class);
++	}
++
++	@Test
++	public void testMetadataResourceSearchOMBusinessTerm() throws Exception {
++		String s = getAllMetadataObjectsOfType("BusinessTerm");
++		Assert.assertTrue(s.contains("BusinessTerm")); // minimal checking that JSON contains something useful and specific to this type
++		JSONUtils.fromJSONList(s, BusinessTerm.class);
++	}
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java
+new file mode 100755
+index 0000000..c70c500
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java
+@@ -0,0 +1,97 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.metadata;
++
++import java.net.URISyntaxException;
++import java.util.Properties;
++
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.core.integrationtest.metadata.MetadataStoreTestBase;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
++import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.MetadataStoreException;
++import org.apache.atlas.odf.api.metadata.RemoteMetadataStore;
++
++public class RemoteMetadataStoreTest extends MetadataStoreTestBase {
++
++	protected MetadataStore getMetadataStore() {
++		RemoteMetadataStore rms = null;
++		try {
++			rms = new RemoteMetadataStore(RestTestBase.getOdfBaseUrl(), RestTestBase.getOdfUser(), Encryption.decryptText(RestTestBase.getOdfPassword()), true);
++		} catch (MetadataStoreException | URISyntaxException e) {
++			throw new RuntimeException("Error connecting to remote metadata store,", e);
++		}
++		return rms;
++	}
++
++	//TODO: Remove all methods below this comment once the DefaultMetadataStore is queue-based (issue #122)
++	// RemoteMetadataStore will then use the exact same test cases as the other (writable) metadata stores 
++
++	@Before
++	public void createSampleData() {
++		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
++		MetadataStore mds = getMetadataStore();
++		mds.resetAllData();
++		mds.createSampleData();
++	}
++
++	@Test
++	public void testProperties() throws Exception {
++		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
++		RemoteMetadataStore rms = new RemoteMetadataStore(RestTestBase.getOdfBaseUrl(), RestTestBase.getOdfUser(), Encryption.decryptText(RestTestBase.getOdfPassword()), true);
++		Properties props = rms.getProperties();
++		Assert.assertNotNull(props);
++		Assert.assertTrue(!props.isEmpty());
++	}
++
++	@Test
++	public void testReferences() throws Exception {
++		//TODO: Do not overwrite original method once DefaultMetadataStore is queue-based
++		MetadataStore mds = getMetadataStore();
++		MetadataStoreTestBase.checkReferences(mds, MetadataStoreTestBase.getDataFileTestObject(mds));
++	}
++
++	@Test
++	public void testSearchAndRetrieve() {
++		//TODO: Do not overwrite original method once DefaultMetadataStore is queue-based
++
++		// Test retrieve
++		MetadataStore mds = getMetadataStore();
++		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
++		Assert.assertEquals("The metadata store did not retrieve the object with the expected name.", "BankClientsShort", mds.retrieve(bankClientsShortRef).getName());
++
++		// Test queries with conditions
++		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
++		checkQueryResults(mds, new String[] { "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.NOT_EQUALS, "BankClientsShort").build(), false);
++		checkQueryResults(mds, new String[] { "NAME" },
++				mds.newQueryBuilder().objectType("Column").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "NAME").simpleCondition("dataType", MetadataQueryBuilder.COMPARATOR.EQUALS, "string").build(), false);
++
++		// Test type hierarchy
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").build(), true);
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("RelationalDataSet").build(), true);
++		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable", "Simple URL example document", "Simple local example document" }, mds.newQueryBuilder().objectType("DataSet").build(), false);
++		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("MetaDataObject").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
++	}
++
++	@Test
++	public void testAnnotations() {
++		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
++	}
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java
+new file mode 100755
+index 0000000..d7bbc0f
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java
+@@ -0,0 +1,133 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.integrationtest.spark;
++
++import java.util.List;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.metadata.MetadataStore;
++import org.apache.atlas.odf.api.metadata.models.Annotation;
++import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.rest.test.RestTestBase;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.apache.wink.json4j.JSONException;
++import org.junit.Assert;
++import org.junit.Before;
++import org.junit.Test;
++
++import org.apache.atlas.odf.api.metadata.RemoteMetadataStore;
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.api.ODFFactory;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.annotation.AnnotationStore;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
++import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
++import org.apache.atlas.odf.core.integrationtest.metadata.internal.spark.SparkDiscoveryServiceLocalTest;
++import org.apache.atlas.odf.core.integrationtest.metadata.internal.spark.SparkDiscoveryServiceLocalTest.DATASET_TYPE;
++import org.apache.atlas.odf.api.settings.SparkConfig;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class SparkDiscoveryServiceWebTest extends RestTestBase {
++	protected static Logger logger = Logger.getLogger(SparkDiscoveryServiceWebTest.class.getName());
++
++	@Before
++	public void createSampleData() throws Exception {
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		checkResult(httpResp, HttpStatus.SC_OK);
++	}
++
++	public static DiscoveryServiceProperties getSparkSummaryStatisticsService() throws JSONException {
++		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
++		dsProperties.setId(SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
++		dsProperties.setName("Spark summary statistics service");
++		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
++		dsProperties.setIconUrl("spark.png");
++		dsProperties.setLink("http://www.spark.apache.org");
++		dsProperties.setPrerequisiteAnnotationTypes(null);
++		dsProperties.setResultingAnnotationTypes(null);
++		dsProperties.setSupportedObjectTypes(null);
++		dsProperties.setAssignedObjectTypes(null);
++		dsProperties.setAssignedObjectCandidates(null);
++		dsProperties.setParallelismCount(2);
++		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
++		endpoint.setJar("file:///tmp/odf-spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
++		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
++		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
++		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
++		return dsProperties;
++	}
++
++	public void runSparkServiceTest(SparkConfig sparkConfig, DATASET_TYPE dataSetType, DiscoveryServiceProperties regInfo, String[] annotationNames) throws Exception{
++		logger.log(Level.INFO, "Testing spark application on ODF webapp url {0}.", getOdfBaseUrl());
++
++		logger.info("Using Spark configuration: " + JSONUtils.toJSON(sparkConfig));
++		ODFSettings settings = settingsRead();
++		settings.setSparkConfig(sparkConfig);
++		settings.setOdfUrl(Utils.getSystemPropertyExceptionIfMissing("odf.test.webapp.url"));
++		settingsWrite(JSONUtils.toJSON(settings), HttpStatus.SC_OK);
++
++		logger.log(Level.INFO, "Trying to delete existing discovery service: " + SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
++		deleteService(SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
++
++		logger.info("Using discovery service: " + JSONUtils.toJSON(regInfo));
++		createService(JSONUtils.toJSON(regInfo), HttpStatus.SC_OK);
++
++		checkServiceExists(regInfo.getId());
++
++		MetadataStore mds = new RemoteMetadataStore(getOdfBaseUrl(), getOdfUser(), Encryption.decryptText(getOdfPassword()), true);
++		Assert.assertNotNull(mds);
++
++
++		RelationalDataSet dataSet = null;
++		if (dataSetType == DATASET_TYPE.FILE) {
++			dataSet = SparkDiscoveryServiceLocalTest.getTestDataFile(mds);
++		} else if (dataSetType == DATASET_TYPE.TABLE) {
++			dataSet = SparkDiscoveryServiceLocalTest.getTestTable(mds);
++		} else {
++			Assert.assertTrue(false);
++		}
++		logger.info("Using dataset: " + JSONUtils.toJSON(dataSet));
++
++		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
++
++		AnalysisRequest request = SparkDiscoveryServiceLocalTest.getSparkAnalysisRequest(dataSet);
++		logger.info("Using analysis request: " + JSONUtils.toJSON(request));
++
++		logger.info("Starting analysis...");
++		String requestId = runAnalysis(request, State.FINISHED);
++
++		List<Annotation> annots = as.getAnnotations(null, requestId);
++		logger.info("Number of annotations created: " + annots.size());
++		Assert.assertTrue("No annotations have been created.", annots.size() > 0);
++	}
++
++	@Test
++	public void testSparkServiceRESTAPI() throws Exception{
++		runSparkServiceTest(SparkDiscoveryServiceLocalTest.getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkSummaryStatisticsService(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
++	}
++
++}
+diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java
+new file mode 100755
+index 0000000..e23dd4e
+--- /dev/null
++++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java
+@@ -0,0 +1,289 @@
++/**
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.atlas.odf.rest.test;
++
++import java.io.InputStream;
++import java.net.URI;
++import java.text.MessageFormat;
++import java.util.logging.Level;
++import java.util.logging.Logger;
++
++import org.apache.http.Header;
++import org.apache.http.HttpResponse;
++import org.apache.http.HttpStatus;
++import org.apache.http.StatusLine;
++import org.apache.http.client.fluent.Executor;
++import org.apache.http.client.fluent.Request;
++import org.apache.http.client.fluent.Response;
++import org.apache.http.client.utils.URIBuilder;
++import org.apache.http.entity.ContentType;
++import org.apache.http.message.BasicHeader;
++import org.junit.Assert;
++import org.junit.BeforeClass;
++
++import org.apache.atlas.odf.core.Encryption;
++import org.apache.atlas.odf.core.Utils;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
++import org.apache.atlas.odf.api.analysis.AnalysisRequest;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
++import org.apache.atlas.odf.api.analysis.AnalysisResponse;
++import org.apache.atlas.odf.api.annotation.Annotations;
++import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
++import org.apache.atlas.odf.api.connectivity.RESTClientManager;
++import org.apache.atlas.odf.api.settings.ODFSettings;
++import org.apache.atlas.odf.api.utils.ODFLogConfig;
++import org.apache.atlas.odf.core.test.TestEnvironment;
++import org.apache.atlas.odf.json.JSONUtils;
++
++public class RestTestBase {
++
++	protected static Logger logger = Logger.getLogger(RestTestBase.class.getName());
++
++	@BeforeClass
++	public static void setup() throws Exception {
++		ODFLogConfig.run();
++		TestEnvironment.startMessaging();
++	}
++	
++	protected static void checkResult(HttpResponse httpResponse, int expectedCode) {
++		StatusLine sl = httpResponse.getStatusLine();
++		int code = sl.getStatusCode();
++		logger.info("Http request returned: " + code + ", message: " + sl.getReasonPhrase());
++		Assert.assertEquals(expectedCode, code);
++	}
++
++	public static RESTClientManager getRestClientManager() {
++		return new RESTClientManager(URI.create(getOdfUrl()), getOdfUser(), Encryption.decryptText(getOdfPassword()));
++	}
++
++	public static String getOdfBaseUrl() {
++		String odfBaseURL = System.getProperty("odf.test.base.url");
++		return odfBaseURL;
++	}
++
++	public static String getOdfUrl() {
++		String odfURL = System.getProperty("odf.test.webapp.url");
++		return odfURL;
++	}
++
++	public static String getOdfUser() {
++		String odfUser = System.getProperty("odf.test.user");
++		return odfUser;
++	}
++
++	public static String getOdfPassword() {
++		String odfPassword = System.getProperty("odf.test.password");
++		return odfPassword;
++	}
++
++	public static String getBaseURI() {
++		return getOdfBaseUrl() + "/odf/api/v1";
++	}
++
++	public String runAnalysis(AnalysisRequest request, State expectedState) throws Exception {
++		Executor exec = getRestClientManager().getAuthenticatedExecutor();
++		String json = JSONUtils.toJSON(request);
++		logger.info("Starting analysis via POST request: " + json);
++
++		Header header = new BasicHeader("Content-Type", "application/json");
++		Request req = Request.Post(getBaseURI() + "/analyses").bodyString(json, ContentType.APPLICATION_JSON).addHeader(header);
++
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		checkResult(httpResp, HttpStatus.SC_OK);
++
++		InputStream is = httpResp.getEntity().getContent();
++		String jsonResponse = JSONUtils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Analysis response: " + jsonResponse);
++		AnalysisResponse analysisResponse = JSONUtils.fromJSON(jsonResponse, AnalysisResponse.class);
++		Assert.assertNotNull(analysisResponse);
++		String requestId = analysisResponse.getId();
++		Assert.assertNotNull(requestId);
++		logger.info("Request Id: " + requestId);
++
++		Assert.assertTrue(! analysisResponse.isInvalidRequest());
++		
++		AnalysisRequestStatus status = null;
++		int maxPolls = 400;
++		do {
++			Request statusRequest = Request.Get(getBaseURI() + "/analyses/" + requestId);
++			logger.info("Getting analysis status");
++			resp = exec.execute(statusRequest);
++			httpResp = resp.returnResponse();
++			checkResult(httpResp, HttpStatus.SC_OK);
++
++			String statusResponse = JSONUtils.getInputStreamAsString(httpResp.getEntity().getContent(), "UTF-8");
++			logger.info("Analysis status: " + statusResponse);
++			status = JSONUtils.fromJSON(statusResponse, AnalysisRequestStatus.class);
++
++			logger.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails(), State.FINISHED });
++			maxPolls--;
++			Thread.sleep(1000);
++		} while (maxPolls > 0 && (status.getState() == State.ACTIVE || status.getState() == State.QUEUED));
++		Assert.assertEquals(State.FINISHED, status.getState());
++		return requestId;
++	}
++
++	public void createService(String serviceJSON, int expectedCode) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++
++		Request req = Request.Post(RestTestBase.getBaseURI() + "/services")//
++				.bodyString(serviceJSON, ContentType.APPLICATION_JSON) //
++		.addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Create service request return code: " + httpResp.getStatusLine().getStatusCode() + ", content: " + s);
++		checkResult(httpResp, expectedCode);
++	}
++	
++	public void checkServiceExists(String serviceId) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/services/" + serviceId).addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Get service request return code: " + httpResp.getStatusLine().getStatusCode() + ", content: " + s);
++		checkResult(httpResp, 200);
++		
++	}
++
++	public void deleteService(String serviceId, int expectedCode) throws Exception {
++		checkResult(this.deleteService(serviceId), expectedCode);
++	}
++
++	public HttpResponse deleteService(String serviceId) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++		URIBuilder uri = new URIBuilder(RestTestBase.getBaseURI() + "/services/" + serviceId + "/cancel");
++		Request req = Request.Post(uri.build())//
++				.addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Delete service request returned: " + s);
++		return httpResp;
++	}
++
++	public ODFSettings settingsRead() throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/settings");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Settings read request returned: " + s);
++		is.close();
++		checkResult(httpResp, HttpStatus.SC_OK);
++		return JSONUtils.fromJSON(s, ODFSettings.class);
++	}
++
++	public void settingsWrite(String configSnippet, int expectedCode) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++
++		Request req = Request.Put(RestTestBase.getBaseURI() + "/settings")//
++				.bodyString(configSnippet, ContentType.APPLICATION_JSON) //
++		.addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Settings write request returned: " + s);
++		checkResult(httpResp, expectedCode);
++	}
++
++	public void settingsReset() throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++		Request req = Request.Post(RestTestBase.getBaseURI() + "/settings/reset")//
++		.addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Config reset request returned: " + s);
++		checkResult(httpResp, HttpStatus.SC_OK);
++	}
++
++	public void cancelAnalysisRequest(String requestId, int expectedCode) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Header header = new BasicHeader("Content-Type", "application/json");
++
++		Request req = Request.Post(RestTestBase.getBaseURI() + "/analyses/" + requestId + "/cancel").addHeader(header);
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		is.close();
++		logger.info("Cancel analyses request returned: " + s);
++		checkResult(httpResp, expectedCode);
++	}
++
++	public AnalysisRequestTrackers getAnalysesRequests(int offset, int limit) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(MessageFormat.format("{0}/analyses?offset={1}&limit={2}", RestTestBase.getBaseURI(), offset, limit));
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Analyses read request returned: " + s);
++		is.close();
++		checkResult(httpResp, HttpStatus.SC_OK);
++		return JSONUtils.fromJSON(s, AnalysisRequestTrackers.class);
++	}
++
++	public AnalysisRequestSummary getAnalysesStats() throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		Request req = Request.Get(RestTestBase.getBaseURI() + "/analyses/stats");
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Analyses statistics request returned: " + s);
++		is.close();
++		checkResult(httpResp, HttpStatus.SC_OK);
++		return JSONUtils.fromJSON(s, AnalysisRequestSummary.class);
++	}
++
++	public Annotations getAnnotations(String analysisRequestId) throws Exception {
++		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
++		URIBuilder uri = new URIBuilder(RestTestBase.getBaseURI() + "/annotations").addParameter("analysisRequestId", analysisRequestId);
++		Request req = Request.Get(uri.build());
++		Response resp = exec.execute(req);
++		HttpResponse httpResp = resp.returnResponse();
++		InputStream is = httpResp.getEntity().getContent();
++
++		String s = Utils.getInputStreamAsString(is, "UTF-8");
++		logger.info("Settings read request returned: " + s);
++		is.close();
++		checkResult(httpResp, HttpStatus.SC_OK);
++		return JSONUtils.fromJSON(s, Annotations.class);
++	}
++}
+diff --git a/odf/odf-web/webpack.config.js b/odf/odf-web/webpack.config.js
+new file mode 100755
+index 0000000..380f705
+--- /dev/null
++++ b/odf/odf-web/webpack.config.js
+@@ -0,0 +1,65 @@
++/**
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *   http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++var path = require('path');
++
++const APP_ROOT="./src/main/webapp";
++const MAIN_FILE= path.resolve(APP_ROOT + "/scripts/odf-console.js");
++const CLIENT_FILE= path.resolve(APP_ROOT + "/scripts/odf-client.js");
++
++module.exports = {
++	entry: {
++		"odf-web": MAIN_FILE,
++		"odf-client": CLIENT_FILE
++	},
++
++    output: {
++        filename: "/[name].js",
++        path: path.resolve(APP_ROOT)
++    },
++
++    module: {
++	    loaders: [
++	      {
++	        test: /\.jsx?$/,
++	        loader: 'babel',
++	        query: {
++	            presets: ['react', 'es2015']
++	        },
++	        	include: /(webapp)/,
++	        	exlude: /(odf-web.js)/
++	      },
++	      {
++	    	  test: /\.(jsx|js)$/,
++	    	  loader: 'imports?jQuery=jquery,$=jquery,this=>window'
++	      },
++	      {
++	          test: /\.css$/,
++	          loader: 'style!css'
++	      },
++	      {
++	          test: /\.(png|jpg)$/,
++	          loader: 'url?limit=25000&name=resources/img/[hash].[ext]'
++	      },
++	      {
++	    	  test: /\.woff(2)?(\?v=[0-9]\.[0-9]\.[0-9])?$/,
++        	  loader: 'url-loader?limit=25000&&minetype=application/font-woff&name=resources/fonts/[hash].[ext]'
++          },
++          {
++        	  test: /\.(ttf|eot|svg)(\?v=[0-9]\.[0-9]\.[0-9])?$/,
++	          loader: 'url?limit=25000&name=resources/fonts/[hash].[ext]'
++          }
++	    ]
++    }
++}
+diff --git a/odf/pom.xml b/odf/pom.xml
+new file mode 100755
+index 0000000..2e1f263
+--- /dev/null
++++ b/odf/pom.xml
+@@ -0,0 +1,133 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
++	<modelVersion>4.0.0</modelVersion>
++	<artifactId>odf</artifactId>
++	<name>odf</name>
++	<groupId>org.apache.atlas.odf</groupId>
++	<version>1.2.0-SNAPSHOT</version>
++	<packaging>pom</packaging>
++
++
++	<modules>
++		<module>odf-api</module>
++		<module>odf-core</module>
++		<module>odf-store</module>
++		<module>odf-messaging</module>
++	</modules>
++
++	<profiles>
++		<profile>
++			<id>atlas</id>
++			<modules>
++				<module>odf-atlas</module>
++			</modules>
++		</profile>
++		<profile>
++			<id>complete-build</id>
++			<activation>
++				<property>
++					<name>reduced-build</name>
++					<value>!true</value>
++				</property>
++			</activation>
++			<modules>
++				<module>odf-spark-example-application</module>
++				<module>odf-spark</module>
++				<module>odf-doc</module>
++				<module>odf-web</module>
++				<module>odf-archetype-discoveryservice</module>
++			</modules>
++		</profile>
++		<profile>
++			<id>test-env</id>
++			<modules>
++				<module>odf-test-env</module>
++			</modules>
++		</profile>
++	</profiles>
++
++	<properties>
++		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
++		<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
++		<testZookeepeConnectionString>localhost:2181</testZookeepeConnectionString>
++		<odf.test.logdir>/tmp</odf.test.logdir>
++		<odf.unittest.logspec>ALL,${odf.test.logdir}/${project.name}-unit-trace.log</odf.unittest.logspec>
++		<odf.integrationtest.logspec>ALL,${odf.test.logdir}/${project.name}-integration-trace.log</odf.integrationtest.logspec>
++		<jackson.version>2.6.5</jackson.version>
++		<jetty.maven.plugin.port>58080</jetty.maven.plugin.port>
++		<odf.test.base.url>https://localhost:${jetty.maven.plugin.port}</odf.test.base.url>
++		<odf.test.webapp.url>https://localhost:${jetty.maven.plugin.port}/odf-web-1.2.0-SNAPSHOT</odf.test.webapp.url>
++		<odf.test.user>sdp</odf.test.user>
++		<odf.test.password>ZzTeX3hKtVORgks+2TaLPWxerucPBoxK</odf.test.password>
++		<atlas.version>0.7-incubating-release</atlas.version>
++		<atlas.url>https://localhost:21443</atlas.url>
++		<atlas.user>admin</atlas.user>
++		<atlas.password>UR0+HOiApXG9B8SNpKN5ww==</atlas.password>
++	</properties>
++
++	<build>
++		<plugins>
++			<!-- make sure we are compiling for Java 1.7 -->
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-compiler-plugin</artifactId>
++				<version>2.3.2</version>
++				<configuration>
++					<source>1.7</source>
++					<target>1.7</target>
++				</configuration>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-antrun-plugin</artifactId>
++				<version>1.8</version>
++				<executions>
++					<execution>
++						<inherited>false</inherited>
++						<phase>test</phase>
++						<goals>
++							<goal>run</goal>
++						</goals>
++						<configuration>
++							<tasks>
++								<delete>
++									<fileset dir="/tmp/" includes="odf-test-execution-log.csv"/>
++								</delete>
++							</tasks>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++			<plugin>
++				<groupId>org.apache.maven.plugins</groupId>
++				<artifactId>maven-dependency-plugin</artifactId>
++				<version>2.10</version>
++				<executions>
++					<execution>
++						<id>list-dependencies</id>
++						<phase>validate</phase>
++						<goals>
++							<goal>tree</goal>
++						</goals>
++						<configuration>
++						</configuration>
++					</execution>
++				</executions>
++			</plugin>
++		</plugins>
++	</build>
++</project>
+diff --git a/odf/prepare_embedded_jetty.xml b/odf/prepare_embedded_jetty.xml
+new file mode 100755
+index 0000000..c9aa044
+--- /dev/null
++++ b/odf/prepare_embedded_jetty.xml
+@@ -0,0 +1,90 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++~
++~ Licensed under the Apache License, Version 2.0 (the "License");
++~ you may not use this file except in compliance with the License.
++~ You may obtain a copy of the License at
++~
++~   http://www.apache.org/licenses/LICENSE-2.0
++~
++~ Unless required by applicable law or agreed to in writing, software
++~ distributed under the License is distributed on an "AS IS" BASIS,
++~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++~ See the License for the specific language governing permissions and
++~ limitations under the License.
++-->
++
++<project name="prepare_embedded_jetty">
++
++	<dirname property="script.basedir" file="${ant.file.prepare_embedded_jetty}" />
++	<property name="source-dir" value="${script.basedir}/jettyconfig" />
++	<property name="download-dir" value="${script.basedir}/target/downloads/jettyconfig" />
++	<property name="target-dir" value="${script.basedir}/target/jettyconfig" />
++
++	<condition property="is-windows">
++		<os family="windows">
++		</os>
++	</condition>
++
++	<condition property="is-unix">
++		<os family="unix">
++		</os>
++	</condition>
++
++	<condition property="is-mac">
++		<os family="mac">
++		</os>
++	</condition>
++
++	<condition property="config-available">
++	   <available file="${target-dir}"/>
++    </condition>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="create-directories">
++		<mkdir dir="${download-dir}"/>
++		<mkdir dir="${target-dir}"/>
++	</target>
++
++	<target name="copy-config-files">
++		<copy todir="${target-dir}">
++			<fileset dir="${source-dir}" />
++			<fileset dir="${download-dir}" />
++		</copy>
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="download-keystore-file-windows" if="is-windows">
++		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${download-dir}/keystore.jks" />
++		<echo message="Downloaded IBM JDK keystore because we are on Windows." />
++	</target>
++
++	<target name="download-keystore-file-mac" if="is-mac">
++		<get verbose="true" src="https://ibm.box.com/shared/static/odnmhqua5sdue03z43vqsv0lp509ov70.jks" dest="${download-dir}/keystore.jks" />
++		<echo message="Downloaded OpenJDK keystore because we are on Mac." />
++	</target>
++
++	<target name="download-keystore-file-unix" if="is-unix">
++		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${download-dir}/keystore.jks" />
++		<echo message="Downloaded IBM JDK keystore because we are on UNIX (Other than Mac)." />
++	</target>
++
++	<target name="download-keystore-file" depends="download-keystore-file-unix,download-keystore-file-windows,download-keystore-file-mac">
++		<!-- keystore.jks file is stored in Box@IBM - Re-generate the file using the Java keytool -->
++		<!-- command: keytool -genkey -alias myatlas -keyalg RSA -keystore /tmp/atlas-security/keystore.jks -keysize 2048 -->
++		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
++	</target>
++
++	<!-- ****************************************************************************************** -->
++
++	<target name="prepare-jetty-config" unless="config-available">
++		<echo message="Preparing jetty configuration..." />
++		<antcall target="create-directories" />
++		<antcall target="download-keystore-file"/>
++		<antcall target="copy-config-files"/>
++		<echo message="Jetty configuration completed." />
++	</target>
++
++</project>
+-- 
+2.10.1 (Apple Git-78)
+
diff --git a/odf/README.md b/odf/README.md
new file mode 100755
index 0000000..fbacd07
--- /dev/null
+++ b/odf/README.md
@@ -0,0 +1,20 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+Open Discovery Framework
+==========================
+
+The Open Discovery Framework (ODF) is an open metadata-based framework that strives to be a common home for different analytics technologies that discover characteristics of data sets and relationships between them (think "AppStore for discovery algorithms"). Using ODF, applications can leverage new discovery algorithms and their results with minimal integration effort.
+
+See [here](odf-doc/src/site/markdown/build.md) how to build and deploy ODF.
diff --git a/odf/jettyconfig/jetty-https.xml b/odf/jettyconfig/jetty-https.xml
new file mode 100755
index 0000000..283e511
--- /dev/null
+++ b/odf/jettyconfig/jetty-https.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
+<Configure id="Server" class="org.eclipse.jetty.server.Server">
+	<Call id="httpsConnector" name="addConnector">
+		<Arg>
+			<New class="org.eclipse.jetty.server.ServerConnector">
+				<Arg name="server">
+					<Ref refid="Server" />
+				</Arg>
+				<Arg name="factories">
+					<Array type="org.eclipse.jetty.server.ConnectionFactory">
+						<Item>
+							<New class="org.eclipse.jetty.server.SslConnectionFactory">
+								<Arg name="next">http/1.1</Arg>
+								<Arg name="sslContextFactory">
+									<Ref refid="sslContextFactory" />
+								</Arg>
+							</New>
+						</Item>
+						<Item>
+							<New class="org.eclipse.jetty.server.HttpConnectionFactory">
+								<Arg name="config">
+									<Ref refid="sslHttpConfig" />
+								</Arg>
+							</New>
+						</Item>
+					</Array>
+				</Arg>
+				<Set name="host">
+					<Property name="jetty.host" />
+				</Set>
+				<Set name="port">
+					<Property name="jetty.maven.plugin.port" default="58080" />
+				</Set>
+				<Set name="idleTimeout">
+					<Property name="https.timeout" default="30000" />
+				</Set>
+			</New>
+		</Arg>
+	</Call>
+	<Call name="addBean">
+		<Arg>
+			<New class="org.eclipse.jetty.security.HashLoginService">
+				<Set name="name">ODF Realm</Set>
+				<Set name="config"><Property name="jetty.config.dir" default="../target/jettyconfig" />/realm.properties</Set>
+			</New>
+		</Arg>
+	</Call>
+</Configure>
diff --git a/odf/jettyconfig/jetty-ssl.xml b/odf/jettyconfig/jetty-ssl.xml
new file mode 100755
index 0000000..fb5b5e3
--- /dev/null
+++ b/odf/jettyconfig/jetty-ssl.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
+<Configure id="sslContextFactory" class="org.eclipse.jetty.util.ssl.SslContextFactory">
+	<Set name="KeyStorePath"><Property name="jetty.config.dir" default="../target/jettyconfig" />/keystore.jks</Set>
+	<Set name="KeyStorePassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
+	<Set name="KeyManagerPassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
+	<Set name="TrustStorePath"><Property name="jetty.config.dir" default="../target/jettyconfig" />/keystore.jks</Set>
+	<Set name="TrustStorePassword">OBF:20zh1zsv1kjo1lca1lf81kmy1zsv20zl</Set>
+	<Set name="EndpointIdentificationAlgorithm"></Set>
+	<Set name="ExcludeCipherSuites">
+		<Array type="String">
+			<Item>SSL_RSA_WITH_DES_CBC_SHA</Item>
+			<Item>SSL_DHE_RSA_WITH_DES_CBC_SHA</Item>
+			<Item>SSL_DHE_DSS_WITH_DES_CBC_SHA</Item>
+			<Item>SSL_RSA_EXPORT_WITH_RC4_40_MD5</Item>
+			<Item>SSL_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
+			<Item>SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
+			<Item>SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA</Item>
+		</Array>
+	</Set>
+	<New id="sslHttpConfig" class="org.eclipse.jetty.server.HttpConfiguration">
+		<Arg>
+			<Ref refid="httpConfig" />
+		</Arg>
+		<Call name="addCustomizer">
+			<Arg>
+				<New class="org.eclipse.jetty.server.SecureRequestCustomizer" />
+			</Arg>
+		</Call>
+	</New>
+</Configure>
diff --git a/odf/jettyconfig/jetty.xml b/odf/jettyconfig/jetty.xml
new file mode 100755
index 0000000..c754b48
--- /dev/null
+++ b/odf/jettyconfig/jetty.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
+<!-- ============================================================= -->
+<!-- Configure the Http Configuration -->
+<!-- ============================================================= -->
+<Configure id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration">
+	<Set name="secureScheme">https</Set>
+	<Set name="securePort"><Property name="jetty.maven.plugin.port" default="58080" /></Set>
+	<Set name="outputBufferSize">32768</Set>
+	<Set name="requestHeaderSize">8192</Set>
+	<Set name="responseHeaderSize">8192</Set>
+	<Set name="sendServerVersion">true</Set>
+	<Set name="sendDateHeader">false</Set>
+	<Set name="headerCacheSize">512</Set>
+</Configure>
diff --git a/odf/jettyconfig/realm.properties b/odf/jettyconfig/realm.properties
new file mode 100755
index 0000000..109d726
--- /dev/null
+++ b/odf/jettyconfig/realm.properties
@@ -0,0 +1,24 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Credentials for ODF basic authentication
+#
+# Format:
+# <username>: <password>[,<rolename> ...]
+#
+# Password is stored in obfuscated format.
+# Re-generate password using the org.eclipse.jetty.util.security.Password class in the jetty lib folder.
+# Example:
+# cd jetty-distribution-<version>/lib
+# java -cp jetty-util-<version>.jar org.eclipse.jetty.util.security.Password <plain password>
+sdp: OBF:1ugg1sov1xfd1k8k1wn31k5m1xfp1sov1uha,user
diff --git a/odf/odf-api/.gitignore b/odf/odf-api/.gitignore
new file mode 100755
index 0000000..2daccfd
--- /dev/null
+++ b/odf/odf-api/.gitignore
@@ -0,0 +1,18 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
diff --git a/odf/odf-api/pom.xml b/odf/odf-api/pom.xml
new file mode 100755
index 0000000..5c8258d
--- /dev/null
+++ b/odf/odf-api/pom.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-api</artifactId>
+	<properties>
+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+	</properties>
+	<dependencies>
+		<dependency>
+			<groupId>com.fasterxml.jackson.core</groupId>
+			<artifactId>jackson-annotations</artifactId>
+			<version>${jackson.version}</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>com.fasterxml.jackson.core</groupId>
+			<artifactId>jackson-databind</artifactId>
+			<version>${jackson.version}</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.wink</groupId>
+			<artifactId>wink-json4j</artifactId>
+			<version>1.4</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-csv</artifactId>
+			<version>1.2</version>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.hamcrest</groupId>
+			<artifactId>hamcrest-all</artifactId>
+			<version>1.3</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.httpcomponents</groupId>
+			<artifactId>fluent-hc</artifactId>
+			<version>4.5.1</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<artifactId>swagger-jaxrs</artifactId>
+			<version>1.5.9</version>
+			<groupId>io.swagger</groupId>
+			<scope>compile</scope>
+		</dependency>
+		<!-- The following dependencies are required by Spark Discovery Services only and are provided by the Spark cluster -->
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-core_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-sql_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+	</dependencies>
+</project>
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java
new file mode 100755
index 0000000..20676b4
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/ODFFactory.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api;
+
+import java.text.MessageFormat;
+
+public class ODFFactory {
+
+	private final static String ODF_DEFAULT_IMPLEMENTATION = "org.apache.atlas.odf.core.OpenDiscoveryFrameworkImpl";
+
+	public OpenDiscoveryFramework create() {
+		Object o = null;
+		Class<?> clazz;
+		try {
+			clazz = this.getClass().getClassLoader().loadClass(ODF_DEFAULT_IMPLEMENTATION);
+		} catch (ClassNotFoundException e) {
+			throw new RuntimeException(MessageFormat.format("Class {0} was not found. Make sure that the odf-core jar and all its dependencies are available on the classpath.", ODF_DEFAULT_IMPLEMENTATION));
+		}
+		try {
+			o = clazz.newInstance();
+		} catch (InstantiationException | IllegalAccessException e) {
+			throw new RuntimeException(MessageFormat.format("Class {0} was found on the classpath but could not be accessed.", ODF_DEFAULT_IMPLEMENTATION));
+		}
+		if (o instanceof OpenDiscoveryFramework) {
+			return (OpenDiscoveryFramework) o;
+		} else {
+			throw new RuntimeException(MessageFormat.format("The class {0} on the classpath is not of type OpenDiscoveryFramework.", ODF_DEFAULT_IMPLEMENTATION));
+		}
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java
new file mode 100755
index 0000000..70ab91b
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/OpenDiscoveryFramework.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api;
+
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.engine.EngineManager;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+
+/**
+*
+* External Java API for managing and controlling ODF
+*
+*/
+public interface OpenDiscoveryFramework {
+
+	/**
+	 * Returns API for managing ODF analysis requests
+	 *
+	 * @return ODF analysis manager API
+	 */
+	public AnalysisManager getAnalysisManager();
+
+	/**
+	 * Returns API for managing ODF discovery services
+	 *
+	 * @return ODF discovery services manager API
+	 */
+	public DiscoveryServiceManager getDiscoveryServiceManager();
+
+	/**
+	 * Returns API for controlling the ODF engine
+	 *
+	 * @return ODF engine manager API
+	 */
+	public EngineManager getEngineManager();
+
+	/**
+	 * Returns API for managing ODF settings
+	 *
+	 * @return ODF settings manager API
+	 */
+	public SettingsManager getSettingsManager();
+
+	/**
+	 * Returns ODF annotation store API
+	 *
+	 * @return ODF annotation store API
+	 */
+	public AnnotationStore getAnnotationStore();
+
+	/**
+	 * Returns ODF metadata store API
+	 *
+	 * @return ODF metadata store API
+	 */
+	public MetadataStore getMetadataStore();
+
+	/**
+	 * Returns JDBC importer utility for populating the metadata store with sample data
+	 *
+	 * @return ODF JDBC importer utility
+	 */
+	public JDBCMetadataImporter getJDBCMetadataImporter();
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java
new file mode 100755
index 0000000..cd294c5
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisCancelResult.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+public class AnalysisCancelResult {
+
+	public enum State {
+		NOT_FOUND,
+		INVALID_STATE,
+		SUCCESS
+	}
+
+	private State state;
+
+	public State getState() {
+		return state;
+	}
+
+	public void setState(State state) {
+		this.state = state;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java
new file mode 100755
index 0000000..6ff6098
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisManager.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+/**
+ *
+ * External interface for creating and managing analysis requests
+ *
+ */
+public interface AnalysisManager {
+
+	/**
+	 * Issues a new ODF analysis request
+	 *
+	 * @param request Analysis request
+	 * @return Response containing the request id and status information
+	 */
+	public AnalysisResponse runAnalysis(AnalysisRequest request);
+
+	/**
+	 * Retrieve status of an ODF analysis request
+	 *
+	 * @param requestId Unique id of the analysis request
+	 * @return Status of the analysis request
+	 */
+	public AnalysisRequestStatus getAnalysisRequestStatus(String requestId);
+
+	/**
+	 * Retrieve statistics about all previous ODF analysis requests
+	 *
+	 * @return Request summary
+	 */
+	public AnalysisRequestSummary getAnalysisStats();
+
+	/**
+	 * Retrieve status details of recent ODF analysis requests
+	 *
+	 * @param offset Starting offset (use 0 to start with the latest request)
+	 * @param limit Maximum number of analysis requests to be returned (use -1 to retrieve all requests)
+	 * @return Status details for each discovery request
+	 */
+	public AnalysisRequestTrackers getAnalysisRequests(int offset, int limit);
+
+	/**
+	 * Request a specific ODF discovery request to be canceled
+	 *
+	 * @param requestId Unique id of the analysis request
+	 * @return Status of the cancellation attempt
+	 */
+	public AnalysisCancelResult cancelAnalysisRequest(String requestId);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java
new file mode 100755
index 0000000..3aa5937
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequest.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+@ApiModel(description="Request for starting a discovery service.")
+public class AnalysisRequest {
+
+	// only used when returned by the ODF
+	@ApiModelProperty(value="Unique request id (generated)", readOnly=true, required=false)
+	private String id;
+
+	@ApiModelProperty(value="Data set to be analyzed (currently limited to  a single data set)", required=true)
+	private List<MetaDataObjectReference> dataSets = new ArrayList<>();
+
+	@ApiModelProperty(value="Sequence of ids (or single id) of the discovery services to be issued", required=false)
+	private List<String> discoveryServiceSequence = new ArrayList<String>();
+
+	@ApiModelProperty(value="List annotation types to be created on the dataset(s)", required=false)
+	private List<String> annotationTypes = new ArrayList<String>();
+
+	@ApiModelProperty(value="Optional additional properties map to be passed to the discovery service(s)", required=false)
+	private Map<String, Object> additionalProperties = new HashMap<String, Object>();
+
+	@ApiModelProperty(value="Indicates that multiple data sets should be processed sequentially rather than in parallel", required=false)
+	private boolean processDataSetsSequentially = true;
+
+	// if false the request will fail if some discovery service that cannot process a data set 
+	@ApiModelProperty(value="Indicates that access to the data set should not be checked before starting the discovery service", required=false)
+	private boolean ignoreDataSetCheck = false;
+
+	public List<MetaDataObjectReference> getDataSets() {
+		return dataSets;
+	}
+
+	public void setDataSets(List<MetaDataObjectReference> dataSets) {
+		this.dataSets = dataSets;
+	}
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public List<String> getDiscoveryServiceSequence() {
+		return discoveryServiceSequence;
+	}
+
+	public void setDiscoveryServiceSequence(List<String> discoveryServiceSequence) {
+		this.discoveryServiceSequence = discoveryServiceSequence;
+	}
+
+	public List<String> getAnnotationTypes() {
+		return annotationTypes;
+	}
+
+	public void setAnnotationTypes(List<String> annotationTypes) {
+		this.annotationTypes = annotationTypes;
+	}
+
+	public Map<String, Object> getAdditionalProperties() {
+		return additionalProperties;
+	}
+
+	public void setAdditionalProperties(Map<String, Object> additionalProperties) {
+		this.additionalProperties = additionalProperties;
+	}
+
+	public boolean isProcessDataSetsSequentially() {
+		return processDataSetsSequentially;
+	}
+
+	public void setProcessDataSetsSequentially(boolean processDataSetsSequentially) {
+		this.processDataSetsSequentially = processDataSetsSequentially;
+	}
+
+	public boolean isIgnoreDataSetCheck() {
+		return ignoreDataSetCheck;
+	}
+
+	public void setIgnoreDataSetCheck(boolean ignoreDataSetCheck) {
+		this.ignoreDataSetCheck = ignoreDataSetCheck;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java
new file mode 100755
index 0000000..b6a120e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestStatus.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+@ApiModel(description="Status of a specific analysis request.")
+public class AnalysisRequestStatus {
+
+	public static enum State {
+		ACTIVE, // some discovery service is processing the request 
+		QUEUED, // in the queue for some discovery service
+		ERROR, // something went wrong 
+		FINISHED, // processing finished successfully 
+		NOT_FOUND, // request ID was not found
+		CANCELLED // request was cancelled by the user
+	}
+
+	@ApiModelProperty(value="Analysis request that was submitted", readOnly=true, required=true)
+	private AnalysisRequest request;
+
+	@ApiModelProperty(value="Status of the request", readOnly=true, required=true)
+	private State state;
+
+	@ApiModelProperty(value="Detailed status description", readOnly=true, required=false)
+	private String details;
+
+	@ApiModelProperty(value="Indicates whether an equivalent request was found", readOnly=true, required=true)
+	private boolean foundExistingRequest = false;
+
+	@ApiModelProperty(value="List of individual discovery service requests that make up the analysis request", readOnly=true, required=true)
+	private List<DiscoveryServiceRequest> serviceRequests;
+
+	@ApiModelProperty(value="Total time the request was queued in milliseconds", readOnly=true, required=true)
+	private long totalTimeOnQueues;
+
+	@ApiModelProperty(value="Total time needed for processing the analysis request in milliseconds", readOnly=true, required=true)
+	private long totalTimeProcessing;
+
+	@ApiModelProperty(value="Total time needed for storing the annotations in the metadata repository in milliseconds", readOnly=true, required=true)
+	private long totalTimeStoringAnnotations;
+
+	public AnalysisRequest getRequest() {
+		return request;
+	}
+
+	public void setRequest(AnalysisRequest request) {
+		this.request = request;
+	}
+
+	public State getState() {
+		return state;
+	}
+
+	public void setState(State state) {
+		this.state = state;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+	public boolean isFoundExistingRequest() {
+		return foundExistingRequest;
+	}
+
+	public void setFoundExistingRequest(boolean foundExistingRequest) {
+		this.foundExistingRequest = foundExistingRequest;
+	}
+
+	public List<DiscoveryServiceRequest> getServiceRequests() {
+		return serviceRequests;
+	}
+
+	public void setServiceRequests(List<DiscoveryServiceRequest> requests) {
+		this.serviceRequests = requests;
+	}
+
+	public long getTotalTimeOnQueues() {
+		return totalTimeOnQueues;
+	}
+
+	public void setTotalTimeOnQueues(long totalTimeOnQueues) {
+		this.totalTimeOnQueues = totalTimeOnQueues;
+	}
+
+	public long getTotalTimeProcessing() {
+		return totalTimeProcessing;
+	}
+
+	public void setTotalTimeProcessing(long totalTimeProcessing) {
+		this.totalTimeProcessing = totalTimeProcessing;
+	}
+
+	public long getTotalTimeStoringAnnotations() {
+		return totalTimeStoringAnnotations;
+	}
+
+	public void setTotalTimeStoringAnnotations(long totalTimeStoringAnnotations) {
+		this.totalTimeStoringAnnotations = totalTimeStoringAnnotations;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java
new file mode 100755
index 0000000..a7982ef
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestSummary.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status summary of all analysis requests submitted since last start of ODF.")
+public class AnalysisRequestSummary {
+
+	@ApiModelProperty(value="Number of successful analysis requests", readOnly=true, required=true)
+	private int success;
+
+	@ApiModelProperty(value="Number of failing analysis requests", readOnly=true, required=true)
+	private int failure;
+
+	AnalysisRequestSummary() {
+	}
+
+	public AnalysisRequestSummary(int success, int failure) {
+		this.success = success;
+		this.failure = failure;
+	}
+	
+	public int getSuccess() {
+		return this.success;
+	}
+
+	public void setSuccess(int success) {
+		this.success = success;
+	}
+
+	public int getFailure() {
+		return this.failure;
+	}
+
+	public void setFailure(int failure) {
+		this.failure = failure;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java
new file mode 100755
index 0000000..49ca9f7
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackerStatus.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+public class AnalysisRequestTrackerStatus {
+	public static enum STATUS {
+		INITIALIZED, //tracker was created, nothing else happened so far
+		IN_DISCOVERY_SERVICE_QUEUE, //tracker is put on queue but not running yet
+		DISCOVERY_SERVICE_RUNNING, //only for async services, analysis is running
+		FINISHED, //analysis finished
+		ERROR, // an error occurred during analysis / processing
+		CANCELLED //the analysis was cancelled by the user
+	};
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java
new file mode 100755
index 0000000..846ed3d
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisRequestTrackers.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Container object tracking the status of multiple analysis requests.")
+public class AnalysisRequestTrackers {
+
+	@ApiModelProperty(value="List of container objects tracking the status of analysis requests", required=true)
+	private List<AnalysisRequestTracker> analysisRequestTrackers;
+
+	public List<AnalysisRequestTracker> getAnalysisRequestTrackers() {
+		return this.analysisRequestTrackers;
+	}
+
+	public void setAnalysisRequestTrackers(List<AnalysisRequestTracker> analysisRequestTrackers) {
+		this.analysisRequestTrackers = analysisRequestTrackers;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java
new file mode 100755
index 0000000..9f1e45c
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/analysis/AnalysisResponse.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.analysis;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+@ApiModel(description="Response returned by the analysis request.")
+public class AnalysisResponse {
+	@ApiModelProperty(value="Unique request id", readOnly=true, required=true)
+	private String id;
+
+	@ApiModelProperty(value="Original request that is equivalent to the submitted one which is therefore skipped", readOnly=true, required=true)
+	private AnalysisRequest originalRequest;
+
+	private boolean isInvalidRequest = false;
+
+	@ApiModelProperty(value="Details about why the request is invalid", readOnly=true, required=false)
+	private String details;
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public AnalysisRequest getOriginalRequest() {
+		return originalRequest;
+	}
+
+	public void setOriginalRequest(AnalysisRequest originalRequest) {
+		this.originalRequest = originalRequest;
+	}
+
+	@ApiModelProperty(name="isInvalidRequest", value="Indicates whether the submitted request is invalid", readOnly=true, required=true)
+	public boolean isInvalidRequest() {
+		return isInvalidRequest;
+	}
+
+	public void setInvalidRequest(boolean isInvalidRequest) {
+		this.isInvalidRequest = isInvalidRequest;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java
new file mode 100755
index 0000000..7e08d74
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStore.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.annotation;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.ExternalStore;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+
+public interface AnnotationStore extends ExternalStore {
+
+	/**
+	 * @return the reference to the object that was created or updated
+	 */
+	MetaDataObjectReference store(Annotation annotation);
+	
+	/**
+	 * Get all annotations attached to the meta data object for a specific analysis request.
+	 */
+	List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId);
+	
+	/**
+	 * Retrieve an annotation by ID
+	 */
+	Annotation retrieveAnnotation(MetaDataObjectReference ref);
+	
+	/// internal
+	void setAnalysisRun(String analysisRun);
+
+	String getAnalysisRun();
+};
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java
new file mode 100755
index 0000000..e51faf6
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/AnnotationStoreUtils.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.annotation;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+
+public class AnnotationStoreUtils {
+
+	/**
+	 * Return the most recent annotations for the passed object but at most one per annotation type.
+	 * Note that this might not be suitable for the semantics represented by some annotation types. 
+	 */
+	public static List<Annotation> getMostRecentAnnotationsByType(AnnotationStore as, MetaDataObjectReference object) {
+		try {
+			// Fix issue 99: only return one annotation per type
+			Map<String, Annotation> mostRecentAnnotationsByType = new HashMap<>();
+			Map<String, Long> typeToMaxTimestamp = new HashMap<>();
+			for (Annotation annot : as.getAnnotations(object, null)) {
+				Long ts = getTimestamp(annot);
+				String annotType = annot.getAnnotationType();
+				Long l = typeToMaxTimestamp.get(annotType);
+				if (l == null) {
+					l = ts;
+				}
+				if (l <= ts) {
+					typeToMaxTimestamp.put(annotType, Long.valueOf(ts));
+					mostRecentAnnotationsByType.put(annotType, annot);
+				}
+			}
+			return new ArrayList<>(mostRecentAnnotationsByType.values());
+		} catch (Exception exc) {
+			throw new MetadataStoreException(exc);
+		}
+	}
+	
+	private static long getTimestamp(Annotation annot) {
+		final long defaultVal = -1;
+		String runId = annot.getAnalysisRun();
+		int ix = runId.lastIndexOf("_");
+		if (ix == -1) {
+			return defaultVal;
+		}
+		String millis = runId.substring(ix);
+		long result = defaultVal;
+		try {
+			result = Long.valueOf(millis);
+		} catch (NumberFormatException e) {
+			return defaultVal;
+		}
+		return result;
+	}
+
+	/**
+	 * Retrieve the annotations of a meta data object for a given annotation store, and annotation type that have been created by
+	 * a certain request.
+	 * 
+	 * @param mdo
+	 * @param store
+	 * @param annotationType
+	 * @param requestId
+	 * @return List of annotations for this mdo, annotation store, and annotation type created by the request with the ID 'requestId'
+	 */
+	
+	public static List<Annotation> retrieveAnnotationsOfRun(MetaDataObject mdo, AnnotationStore store, String annotationType, String requestId) {
+		Logger logger = Logger.getLogger(AnnotationStoreUtils.class.getName());
+		List<Annotation> annotations = new ArrayList<>();
+		for (Annotation annot : store.getAnnotations(mdo.getReference(), null)) {
+			logger.log(Level.FINER, "Found annotation on object {0} with analysis run {1} and annotationType {2}",
+					new Object[] { mdo.getReference().getId(), annot.getAnalysisRun(), annot.getAnnotationType() });
+			if (annot.getAnalysisRun().equals(requestId) && annot.getAnnotationType().equals(annotationType)) {
+				annotations.add(annot);
+			}
+		}
+		return annotations;
+	}
+
+	/**
+	 * For a given annotation return the reference to the annotated object. Throw a MetaDataStoreException if this reference is null. 
+	 * 
+	 * @param annot
+	 * @return Meta data reference of annotation 'annot'
+	 */
+	
+	public static MetaDataObjectReference getAnnotatedObject(Annotation annot) {
+		MetaDataObjectReference annotRef = null;
+		if (annot instanceof ProfilingAnnotation) {
+			annotRef = ((ProfilingAnnotation) annot).getProfiledObject();
+		} else if (annot instanceof ClassificationAnnotation) {
+			annotRef = ((ClassificationAnnotation) annot).getClassifiedObject();
+		} else if (annot instanceof RelationshipAnnotation) {
+			
+			List<MetaDataObjectReference> refs = ((RelationshipAnnotation) annot).getRelatedObjects();
+			if (refs != null && refs.size() > 0) {
+				annotRef = refs.get(0);
+			}
+		}
+		if (annotRef == null) {
+			String errorMessage = MessageFormat.format("The annotated object of annotation with ID ''{0}'' is null.", annot.getReference().getId());
+			throw new MetadataStoreException(errorMessage);
+		}
+		return annotRef;
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java
new file mode 100755
index 0000000..058b472
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/annotation/Annotations.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.annotation;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+
+public class Annotations {
+	private List<Annotation> annotations;
+
+	public List<Annotation> getAnnotations() {
+		return annotations;
+	}
+
+	public void setAnnotations(List<Annotation> annotations) {
+		this.annotations = annotations;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java
new file mode 100755
index 0000000..c67ae97
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataRetrievalException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.connectivity;
+
+public class DataRetrievalException extends RuntimeException {
+
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = 4978058839277657L;
+
+	public DataRetrievalException() {
+		super();
+	}
+
+	public DataRetrievalException(String message) {
+		super(message);
+	}
+
+	public DataRetrievalException(Throwable cause) {
+		super(cause);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java
new file mode 100755
index 0000000..8dc2579
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetriever.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.connectivity;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
+
+public interface DataSetRetriever {
+
+	void setMetadataStore(MetadataStore mds);
+
+	boolean canRetrieveDataSet(DataSet oMDataSet);
+	
+	MaterializedDataSet retrieveRelationalDataSet(RelationalDataSet relationalDataSet) throws DataRetrievalException;
+	
+	void createCsvFile(RelationalDataSet relationalDataSet, String fileName) throws DataRetrievalException;
+
+	JDBCRetrievalResult retrieveTableAsJDBCResultSet(Table oMTable) throws DataRetrievalException;
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java
new file mode 100755
index 0000000..6846ba5
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/DataSetRetrieverImpl.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.connectivity;
+
+import java.io.File;
+import java.io.PrintWriter;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.commons.csv.CSVFormat;
+import org.apache.commons.csv.CSVParser;
+import org.apache.commons.csv.CSVRecord;
+
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
+
+/**
+ * This class is a helper to retrieve actual data from a data source by passing an object that represents a reference to the dataset.
+ *
+ */
+public class DataSetRetrieverImpl implements DataSetRetriever {
+
+	Logger logger = Logger.getLogger(DataSetRetrieverImpl.class.getName());
+	MetadataStore metaDataStore;
+
+	public DataSetRetrieverImpl() {
+	}
+
+	public DataSetRetrieverImpl(MetadataStore metaDataStore) {
+		this.metaDataStore = metaDataStore;
+	}
+
+	@Override
+	public void setMetadataStore(MetadataStore mds) {
+		this.metaDataStore = mds;
+	}
+
+	@Override
+	public boolean canRetrieveDataSet(DataSet dataSet) {
+		if (dataSet instanceof DataFile) {
+			DataFile dataFile = (DataFile) dataSet;
+			return getValidURL(dataFile) != null;
+		} else if (dataSet instanceof Table) {
+			Connection connection = getJDBCConnection((JDBCConnectionInfo) metaDataStore.getConnectionInfo(dataSet));
+			if (connection != null) {
+				try {
+					connection.close();
+					return true;
+				} catch (SQLException e) {
+					// do nothing
+				}
+			}
+		}
+		return false;
+	}
+
+	@Override
+	public MaterializedDataSet retrieveRelationalDataSet(RelationalDataSet relationalDataSet) {
+		if (relationalDataSet instanceof DataFile) {
+			return retrieveDataFile((DataFile) relationalDataSet);
+		} else if (relationalDataSet instanceof Table) {
+			return retrieveTableWithJDBC((Table) relationalDataSet);
+		}
+		return null;
+	}
+
+	@Override
+	public void createCsvFile(RelationalDataSet relationalDataSet, String fileName) {
+		try {
+			logger.log(Level.INFO, "Creating CSV input data file ", fileName);
+			MaterializedDataSet mds = retrieveRelationalDataSet(relationalDataSet);
+			PrintWriter printWriter = new PrintWriter(new File(fileName), "UTF-8") ;
+			int columnCount = mds.getColumns().size();
+			String headers = "\"" + mds.getColumns().get(0).getName() + "\"" ;
+			for (int i = 1; i < columnCount; i++) {
+				headers += ",\"" + mds.getColumns().get(i).getName() + "\"" ;
+			}
+			printWriter.println(headers);
+			for (int i = 0; i < mds.getData().size(); i++) {
+				String row = "\"" + mds.getData().get(i).get(0).toString() + "\"";
+				for (int j = 1 ; j < columnCount; j++ ) {
+					row += ",\"" + mds.getData().get(i).get(j).toString() + "\"";
+				}
+				printWriter.println(row);
+			}
+			printWriter.close();
+		} catch(Exception exc) {
+			throw new DataRetrievalException(exc);
+		}
+	}
+
+	private URL getValidURL(DataFile dataFile) {
+		try {
+			Charset.forName(dataFile.getEncoding());
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, MessageFormat.format("Encoding ''{0}'' of data file ''{1}''is not valid''", new Object[] { dataFile.getEncoding(), dataFile.getUrlString() }), exc);
+			return null;
+		}
+		String urlString = dataFile.getUrlString();
+		try {
+			URL url = new URL(urlString);
+			url.openConnection().connect();
+			return url;
+		} catch (Exception exc) {
+			String msg = MessageFormat.format("Could not connect to data file URL ''{0}''. Error: {1}", new Object[] { urlString, exc.getMessage() });
+			logger.log(Level.WARNING, msg, exc);
+			return null;
+		}
+	}
+
+	private MaterializedDataSet retrieveDataFile(DataFile dataFile) throws DataRetrievalException {
+		URL url = this.getValidURL(dataFile);
+		if (url == null) {
+			return null;
+		}
+		List<Column> columns = metaDataStore.getColumns(dataFile);
+		List<List<Object>> data = new ArrayList<>();
+
+		try {
+			CSVParser csvParser = CSVParser.parse(url, Charset.forName(dataFile.getEncoding()), CSVFormat.DEFAULT.withHeader());
+			List<CSVRecord> records = csvParser.getRecords();
+			Map<String, Integer> headerMap = csvParser.getHeaderMap();
+			csvParser.close();
+
+			for (CSVRecord record : records) {
+				List<Object> targetRecord = new ArrayList<>();
+				for (int i = 0; i < columns.size(); i++) {
+					Column col = columns.get(i);
+					String value = record.get(headerMap.get(col.getName()));
+					Object convertedValue = value;
+					if (col.getDataType().equals("int")) {
+						convertedValue = Integer.parseInt(value);
+					} else if (col.getDataType().equals("double")) {
+						convertedValue = Double.parseDouble(value);
+					}
+					// TODO add more conversions
+					targetRecord.add(convertedValue);
+				}
+				data.add(targetRecord);
+			}
+
+		} catch (Exception exc) {
+			throw new DataRetrievalException(exc);
+		}
+
+		MaterializedDataSet materializedDS = new MaterializedDataSet();
+		materializedDS.setTable(dataFile);
+		materializedDS.setColumns(columns);
+		materializedDS.setData(data);
+		return materializedDS;
+	}
+
+	public static String quoteForJDBC(String s) {
+		// TODO implement to prevent SQL injection
+		return s;
+	}
+
+	Connection getJDBCConnection(JDBCConnectionInfo connectionInfo) {
+		if ((connectionInfo.getConnections() == null) || connectionInfo.getConnections().isEmpty()) {
+			return null;
+		}
+		JDBCConnection connectionObject = null;
+		connectionObject = (JDBCConnection) connectionInfo.getConnections().get(0); // Use first connection
+		try {
+			return DriverManager.getConnection(connectionObject.getJdbcConnectionString(), connectionObject.getUser(), connectionObject.getPassword());
+		} catch (SQLException exc) {
+			logger.log(Level.WARNING, MessageFormat.format("JDBC connection to ''{0}'' for table ''{1}'' could not be created", new Object[] { connectionObject.getJdbcConnectionString(),
+					connectionInfo.getSchemaName() + "." + connectionInfo.getTableName() }), exc);
+			return null;
+		}
+	}
+
+	private MaterializedDataSet retrieveTableWithJDBC(Table table) {
+
+		JDBCRetrievalResult jdbcRetrievalResult = this.retrieveTableAsJDBCResultSet(table);
+		if (jdbcRetrievalResult == null) {
+			logger.log(Level.FINE, "JDBC retrieval result for table ''{0}'' is null", table.getReference().getUrl());
+			return null;
+		}
+
+		Map<String, Column> columnMap = new HashMap<String, Column>();
+		for (Column column : metaDataStore.getColumns(table)) {
+			columnMap.put(column.getName(), column);
+		}
+		logger.log(Level.INFO, "Table columns: {0}", columnMap.keySet());
+
+		ResultSet rs = null;
+		try {
+			logger.log(Level.FINE, "Executing prepared statement " + jdbcRetrievalResult.getPreparedStatement());
+			rs = jdbcRetrievalResult.getPreparedStatement().executeQuery();
+			ResultSetMetaData rsmd = rs.getMetaData();
+			List<Column> resultSetColumns = new ArrayList<>();
+			int columnCount = rsmd.getColumnCount();
+			for (int i = 1; i <= columnCount; i++) {
+				Column col = new Column();
+				col.setName(rsmd.getColumnName(i));
+				col.setDataType(rsmd.getColumnTypeName(i));
+				Column retrievedColumn = columnMap.get(col.getName());
+				if (retrievedColumn != null && retrievedColumn.getReference() != null) {
+					col.setReference(retrievedColumn.getReference());
+				} else {
+					logger.log(Level.WARNING, "Error setting reference on column, this can cause issues when annotations are created on the column!");
+				}
+				resultSetColumns.add(col);
+			}
+
+			List<List<Object>> data = new ArrayList<>();
+			while (rs.next()) {
+				List<Object> row = new ArrayList<>();
+				for (int i = 1; i <= columnCount; i++) {
+					row.add(rs.getObject(i));
+				}
+				data.add(row);
+			}
+
+			MaterializedDataSet result = new MaterializedDataSet();
+			result.setTable(table);
+			result.setColumns(resultSetColumns);
+			result.setData(data);
+			return result;
+		} catch (SQLException exc) {
+			throw new DataRetrievalException(exc);
+		} finally {
+			try {
+				if (rs != null) {
+					rs.close();
+				}
+				jdbcRetrievalResult.close();
+			} catch (SQLException exc) {
+				throw new DataRetrievalException(exc);
+			}
+		}
+
+	}
+
+	@Override
+	public JDBCRetrievalResult retrieveTableAsJDBCResultSet(Table table) {
+		JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) this.metaDataStore.getConnectionInfo(table);
+		Connection connection = null;
+		PreparedStatement stat = null;
+		try {
+			connection = this.getJDBCConnection(connectionInfo);
+			if (connection == null) {
+				logger.log(Level.FINE, "No jdbc connection found for table ''{0}'' (''{1}'')", new Object[]{table.getName(), table.getReference().getUrl()});
+				return null;
+			}
+			String schemaName = connectionInfo.getSchemaName();
+			String sql = "select * from " + quoteForJDBC(schemaName) + "." + quoteForJDBC(table.getName());
+			logger.log(Level.FINER, "Running JDBC statement: ''{0}''", sql);
+			stat = connection.prepareStatement(sql);
+			return new JDBCRetrievalResult(connection, stat);
+		} catch (SQLException exc) {
+			String msg = MessageFormat.format("An SQL exception occurred when preparing data access for table ''{0}'' ({1})", new Object[]{table.getName(), table.getReference().getUrl()});
+			logger.log(Level.WARNING, msg, exc);
+			try {
+				if (connection != null) {
+					connection.close();
+				}
+			} catch (SQLException exc2) {
+				// do nothing
+				logger.log(Level.WARNING, msg, exc2);
+				throw new DataRetrievalException(exc2);
+			}
+			throw new DataRetrievalException(exc);
+		}
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java
new file mode 100755
index 0000000..b418eb0
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/JDBCRetrievalResult.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.connectivity;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class JDBCRetrievalResult {
+
+	private Connection connection;
+	private PreparedStatement preparedStatement;
+
+	public JDBCRetrievalResult(Connection connection, PreparedStatement preparedStatement) {
+		super();
+		this.connection = connection;
+		this.preparedStatement = preparedStatement;
+	}
+
+	public Connection getConnection() {
+		return connection;
+	}
+
+	public PreparedStatement getPreparedStatement() {
+		return preparedStatement;
+	}
+
+	public void close() throws SQLException {
+		if (preparedStatement != null) {
+			preparedStatement.close();
+		}
+		if (connection != null) {
+			connection.close();
+		}
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java
new file mode 100755
index 0000000..7946084
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/connectivity/RESTClientManager.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.connectivity;
+
+import java.net.URI;
+import java.security.GeneralSecurityException;
+import java.security.cert.X509Certificate;
+import java.util.logging.Logger;
+
+import javax.net.ssl.SSLContext;
+
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContextBuilder;
+import org.apache.http.ssl.TrustStrategy;
+
+/**
+ * 
+ * This is a helper class to authenticate http requests
+ *
+ */
+public class RESTClientManager {
+
+	Logger logger = Logger.getLogger(RESTClientManager.class.getName());
+
+	private Executor executor = null;
+
+	private URI baseUrl;
+	private String user;
+	private String password;
+
+	public RESTClientManager(URI baseUrl, String user, String password) {
+		this.baseUrl = baseUrl;
+		this.user = user;
+		this.password = password;
+	}
+
+	public RESTClientManager(URI baseUrl) {
+		this(baseUrl, null, null);
+	}
+
+	public Executor getAuthenticatedExecutor() throws GeneralSecurityException {
+		if (executor != null) {
+			return executor;
+		}
+		// TODO always accept the certificate for now but do proper certificate stuff in the future 
+		TrustStrategy acceptAllTrustStrategy = new TrustStrategy() {
+			@Override
+			public boolean isTrusted(X509Certificate[] certificate, String authType) {
+				return true;
+			}
+		};
+		SSLContextBuilder contextBuilder = new SSLContextBuilder();
+		SSLContext context = contextBuilder.loadTrustMaterial(null, acceptAllTrustStrategy).build();
+		SSLConnectionSocketFactory scsf = new SSLConnectionSocketFactory(context, new NoopHostnameVerifier());
+
+		HttpClient httpClient = HttpClientBuilder.create() //
+				.setSSLSocketFactory(scsf) //
+				.build();
+
+		if (this.user != null) {
+			if (this.baseUrl == null) {
+				executor = Executor.newInstance(httpClient).auth(new UsernamePasswordCredentials(this.user, this.password));
+			} else {
+				executor = Executor.newInstance(httpClient).auth(this.baseUrl.getHost(), new UsernamePasswordCredentials(this.user, this.password));
+			}
+		} else {
+			executor = Executor.newInstance(httpClient);
+		}
+		return executor;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java
new file mode 100755
index 0000000..23ef661
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/AnalysisRequestTracker.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus;
+
+// JSON
+@ApiModel(description="Container for tracking the status of an analysis request.")
+public class AnalysisRequestTracker {
+
+	@ApiModelProperty(value="Analysis request", required=true)
+	private AnalysisRequest request;
+
+	@ApiModelProperty(value="List of discovery service requests that make up the analysis request", required=true)
+	private List<DiscoveryServiceRequest> discoveryServiceRequests = new ArrayList<DiscoveryServiceRequest>();
+
+	@ApiModelProperty(value="List of responses, one for each discovery service request", required=true)
+	private List<DiscoveryServiceResponse> discoveryServiceResponses = new ArrayList<DiscoveryServiceResponse>();
+
+	@ApiModelProperty(value="Status of the analysis request", required=true)
+	private AnalysisRequestTrackerStatus.STATUS status = AnalysisRequestTrackerStatus.STATUS.INITIALIZED;
+
+	@ApiModelProperty(value="Detailed status of the analysis request", required=false)
+	private String statusDetails;
+
+	@ApiModelProperty(value="Timestamp of last status update", required=true)
+	private long lastModified;
+
+	@ApiModelProperty(value="User who has submitted the analysis request", required=true)
+	private String user;
+
+	// A tracker object is used to publish changes across all ODF nodes. When writing a tracker on the queue,
+	// a revision is added so that we know when a tracker has successfully been stored in the ODF that wrote it.
+	// This is necessary to make storing of these trackers a synchronous method.
+	@ApiModelProperty(value="Internal revision id of the analysis request", required=true)
+	private String revisionId;
+
+	@ApiModelProperty(value="Next discovery service request to be issued")
+	private int nextDiscoveryServiceRequest = 0;
+
+	public String getUser() {
+		return user;
+	}
+
+	public void setUser(String user) {
+		this.user = user;
+	}
+
+	public long getLastModified() {
+		return lastModified;
+	}
+
+	public void setLastModified(long lastModified) {
+		this.lastModified = lastModified;
+	}
+
+	public List<DiscoveryServiceRequest> getDiscoveryServiceRequests() {
+		return discoveryServiceRequests;
+	}
+
+	public void setDiscoveryServiceRequests(List<DiscoveryServiceRequest> discoveryServiceRequests) {
+		this.discoveryServiceRequests = discoveryServiceRequests;
+	}
+
+	public int getNextDiscoveryServiceRequest() {
+		return nextDiscoveryServiceRequest;
+	}
+
+	public void setNextDiscoveryServiceRequest(int nextDiscoveryServiceRequest) {
+		this.nextDiscoveryServiceRequest = nextDiscoveryServiceRequest;
+	}
+
+	public AnalysisRequest getRequest() {
+		return request;
+	}
+
+	public void setRequest(AnalysisRequest request) {
+		this.request = request;
+	}
+
+	public AnalysisRequestTrackerStatus.STATUS getStatus() {
+		return status;
+	}
+
+	public void setStatus(AnalysisRequestTrackerStatus.STATUS status) {
+		this.status = status;
+	}
+
+	public String getStatusDetails() {
+		return statusDetails;
+	}
+
+	public void setStatusDetails(String statusDetails) {
+		this.statusDetails = statusDetails;
+	}
+
+	public List<DiscoveryServiceResponse> getDiscoveryServiceResponses() {
+		return discoveryServiceResponses;
+	}
+
+	public void setDiscoveryServiceResponses(List<DiscoveryServiceResponse> discoveryServiceResponses) {
+		this.discoveryServiceResponses = discoveryServiceResponses;
+	}
+
+	public String getRevisionId() {
+		return revisionId;
+	}
+
+	public void setRevisionId(String revisionId) {
+		this.revisionId = revisionId;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java
new file mode 100755
index 0000000..3e46e83
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DataSetCheckResult.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * 
+ * An object of this class must be returned by a services checkDataSet method.
+ *
+ */
+@ApiModel(description="Result returned by REST-based discovery services that indicates whether a dataset can be processed by the service.")
+public class DataSetCheckResult {
+
+	public static enum DataAccess {
+		NotPossible,
+		Possible
+	};
+
+	@ApiModelProperty(value="Indicates whether a dataset can be accessed by a discovery service, i.e. whether access is possible or not", readOnly=true, required=true)
+	private DataAccess dataAccess = DataAccess.Possible;
+
+	@ApiModelProperty(value="Message explaining why access to the dataset is not possible", readOnly=true)
+	private String details;
+
+	public DataAccess getDataAccess() {
+		return dataAccess;
+	}
+
+	public void setDataAccess(DataAccess dataAccess) {
+		this.dataAccess = dataAccess;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java
new file mode 100755
index 0000000..99366e7
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryService.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.concurrent.ExecutorService;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+
+/**
+ * Every kind of discovery service must implement this interface
+ * For java services, the executor service can be used to start / manage threads with credentials of the current ODF user
+ * The metadata store can be used to access metadata required by the service.
+ *
+ */
+public interface DiscoveryService {
+
+	void setExecutorService(ExecutorService executorService);
+	
+	void setMetadataStore(MetadataStore metadataStore);
+	void setAnnotationStore(AnnotationStore annotationStore);
+
+    /**
+     * Checks whether a data set can be processed by the discovery service.
+     * 
+     * @param dataSetContainer Data set container that contains a reference to the data set to be accessed
+     * @return Status information whether access to the data set is possible or not
+     */
+	DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java
new file mode 100755
index 0000000..db73966
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceBase.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.concurrent.ExecutorService;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+
+/**
+ * A discovery service base class that services can subclass for convenience.
+ * 
+ *
+ */
+public abstract class DiscoveryServiceBase implements DiscoveryService {
+	protected ExecutorService executorService;
+	protected MetadataStore metadataStore;
+	protected AnnotationStore annotationStore;
+
+	@Override
+	public void setExecutorService(ExecutorService executorService) {
+		this.executorService = executorService;
+	}
+
+	@Override
+	public void setMetadataStore(MetadataStore metadataStore) {
+		this.metadataStore = metadataStore;
+	}
+
+	@Override
+	public void setAnnotationStore(AnnotationStore annotationStore) {
+		this.annotationStore = annotationStore;
+	}
+
+	@Override
+	public DataSetCheckResult checkDataSet(DataSetContainer dataSet) {
+		DataSetCheckResult result = new DataSetCheckResult();
+		result.setDataAccess(DataSetCheckResult.DataAccess.Possible);
+		return result;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java
new file mode 100755
index 0000000..d3f63e8
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceEndpoint.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.wink.json4j.JSONException;
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonAnySetter;
+
+import io.swagger.annotations.ApiModel;
+
+//JSON
+@ApiModel(description="Endpoint of the discovery service.")
+public class DiscoveryServiceEndpoint {
+	private String runtimeName;
+	private Map<String, Object> props = new HashMap<>();
+
+	public String getRuntimeName() {
+		return runtimeName;
+	}
+
+	public void setRuntimeName(String runtimeName) {
+		this.runtimeName = runtimeName;
+	}
+	
+	@JsonAnyGetter
+	public Map<String, Object> get() {
+		return props;
+	}
+
+	@JsonAnySetter
+	public void set(String name, Object value) {
+		props.put(name, value);
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java
new file mode 100755
index 0000000..8e79511
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceJavaEndpoint.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+/**
+ * 
+ * This class represents a java ODF discovery service endpoint. 
+ * Note: It doesn't inherit from DiscoveryServiceEndpoint. To convert this from / to this class use JSONUtils.convert()
+ * 
+ */
+public class DiscoveryServiceJavaEndpoint {
+
+	private String runtimeName;
+	/*
+	 * The class name identifies a class that must be available on the classpath and implements the ODF service interface
+	 */
+	private String className;
+
+	public DiscoveryServiceJavaEndpoint() {
+		this.setRuntimeName("Java");
+	}
+	
+	public String getClassName() {
+		return className;
+	}
+
+	public void setClassName(String className) {
+		this.className = className;
+	}
+
+	public String getRuntimeName() {
+		return runtimeName;
+	}
+
+	public void setRuntimeName(String runtimeName) {
+		this.runtimeName = runtimeName;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java
new file mode 100755
index 0000000..00e28e2
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceManager.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.io.InputStream;
+import java.util.List;
+
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+/**
+ *
+ * External Java API for creating and managing discovery services
+ *
+ */
+public interface DiscoveryServiceManager {
+
+	/**
+	 * Retrieve list of discovery services registered in ODF
+	 * @return List of registered ODF discovery services
+	 */
+	public List<DiscoveryServiceProperties> getDiscoveryServicesProperties();
+
+	/**
+	 * Register a new service in ODF
+	 * @param dsProperties Properties of the discovery service to register
+	 * @throws ValidationException Validation of a property failed
+	 */
+	public void createDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException;
+
+	/**
+	 * Update configuration of an ODF discovery service
+	 * @param dsProperties Properties of the discovery service to update
+	 */
+	public void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ServiceNotFoundException, ValidationException;
+
+	/**
+	 * Remove a registered service from ODF
+	 * @param serviceId Discovery service ID
+	 */
+	public void deleteDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException;
+
+	/**
+	 * Retrieve current configuration of a discovery services registered in ODF
+	 * @param serviceId Discovery Service ID
+	 * @return Properties of the service with this ID
+	 * @throws ServiceNotFoundException A service with this ID is not registered
+	 */
+	public DiscoveryServiceProperties getDiscoveryServiceProperties(String serviceId) throws ServiceNotFoundException;
+
+	/**
+	 * Retrieve status overview of all discovery services registered in ODF
+	 * @return List of status count maps for all discovery services
+	 */
+	public List<ServiceStatusCount> getDiscoveryServiceStatusOverview();
+
+	/**
+	 * Retrieve status of a specific discovery service. Returns null if no service info can be obtained
+	 * @param serviceId Discovery Service ID
+	 * @return Status of the service with this ID
+	 */
+	public DiscoveryServiceStatus getDiscoveryServiceStatus(String serviceId) throws ServiceNotFoundException;
+
+	/**
+	 * Retrieve runtime statistics of a specific discovery service
+	 * @param serviceId Discovery Service ID
+	 * @return Runtime statistics of the service with this ID
+	 */
+	public DiscoveryServiceRuntimeStatistics getDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException;
+
+	/**
+	 * Delete runtime statistics of a specific discovery service
+	 * @param serviceId Discovery Service ID
+	 */
+	public void deleteDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException;
+
+	/**
+	 * Retrieve picture representing a discovery service
+	 * @param serviceId Discovery Service ID
+	 * @return Input stream for image
+	 */
+	public InputStream getDiscoveryServiceImage(String serviceId) throws ServiceNotFoundException;
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java
new file mode 100755
index 0000000..78989ec
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceProperties.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * 
+ * This class is used for the registration of a service at an ODF instance.
+ * A JSON document of this type must be provided by a remote discovery service implementation in order to register it with an ODF instance
+ *
+ */
+@ApiModel(description="Parameters describing a discovery service.")
+public class DiscoveryServiceProperties {
+	@ApiModelProperty(value="Unique id string of the discovery service", required=true)
+	private String id;
+
+	@ApiModelProperty(value="Descriptive name of the discovery service", required=true)
+	private String name;
+
+	@ApiModelProperty(value="Optional description of the discovery service")
+	private String description;
+
+	@ApiModelProperty(value="Optional custom description of the discovery service")
+	private String customDescription;
+
+	@ApiModelProperty(value="Optional link to a JPG or PNG image illustrating the discovery service")
+	private String iconUrl;
+
+	@ApiModelProperty(value="Optional URL pointing to the description of the discovery service")
+	private String link;
+	
+	public String getCustomDescription() {
+		return customDescription;
+	}
+
+	public void setCustomDescription(String customDescription) {
+		this.customDescription = customDescription;
+	}
+
+	public List<String> getPrerequisiteAnnotationTypes() {
+		return prerequisiteAnnotationTypes;
+	}
+
+	public void setPrerequisiteAnnotationTypes(List<String> prerequisiteAnnotationTypes) {
+		this.prerequisiteAnnotationTypes = prerequisiteAnnotationTypes;
+	}
+
+	public List<String> getResultingAnnotationTypes() {
+		return resultingAnnotationTypes;
+	}
+
+	public void setResultingAnnotationTypes(List<String> resultingAnnotationTypes) {
+		this.resultingAnnotationTypes = resultingAnnotationTypes;
+	}
+
+	public List<String> getSupportedObjectTypes() {
+		return supportedObjectTypes;
+	}
+
+	public void setSupportedObjectTypes(List<String> supportedObjectTypes) {
+		this.supportedObjectTypes = supportedObjectTypes;
+	}
+
+	public List<String> getAssignedObjectTypes() {
+		return assignedObjectTypes;
+	}
+
+	public void setAssignedObjectTypes(List<String> assignedObjectTypes) {
+		this.assignedObjectTypes = assignedObjectTypes;
+	}
+
+	public List<String> getAssignedObjectCandidates() {
+		return assignedObjectCandidates;
+	}
+
+	public void setAssignedObjectCandidates(List<String> assignedObjectCandidates) {
+		this.assignedObjectCandidates = assignedObjectCandidates;
+	}
+
+	@ApiModelProperty(value="List of prerequisite annotation types required to run the discovery service")
+	private List<String> prerequisiteAnnotationTypes;
+
+	@ApiModelProperty(value="List annotation types created by the discovery service")
+	private List<String> resultingAnnotationTypes;
+
+	@ApiModelProperty(value="Types of objects that can be analyzed by the discovery service")
+	private List<String> supportedObjectTypes;
+
+	@ApiModelProperty(value="Types of objects that may be assigned to the resulting annotations")
+	private List<String> assignedObjectTypes;
+
+	@ApiModelProperty(value="Ids of specific objects (e.g. data classes) that may be assigned to resulting annotations")
+	private List<String> assignedObjectCandidates;
+
+	@ApiModelProperty(value = "Number of parallel analyses the service can handle, with a default of 2")
+	private Integer parallelismCount = 2;
+
+	@ApiModelProperty(value="Endpoint of the discovery service", required=true)
+	private DiscoveryServiceEndpoint endpoint;
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public String getName() {
+		return name;
+	}
+
+	public void setName(String name) {
+		this.name = name;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public void setDescription(String description) {
+		this.description = description;
+	}
+
+	public String getIconUrl() {
+		return iconUrl;
+	}
+
+	public void setIconUrl(String iconURL) {
+		this.iconUrl = iconURL;
+	}
+
+	public String getLink() {
+		return link;
+	}
+
+	public void setLink(String link) {
+		this.link = link;
+	}
+
+	public DiscoveryServiceEndpoint getEndpoint() {
+		return endpoint;
+	}
+
+	public void setEndpoint(DiscoveryServiceEndpoint endpoint) {
+		this.endpoint = endpoint;
+	}
+
+	public Integer getParallelismCount() {
+		return parallelismCount;
+	}
+
+	public void setParallelismCount(Integer parallelismCount) {
+		this.parallelismCount = parallelismCount;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java
new file mode 100755
index 0000000..a922a08
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServicePropertiesList.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * JSON object representing properties of registered discovery services.
+ *  
+ */
+
+@ApiModel(description="List of properties of registered discovery services")
+public class DiscoveryServicePropertiesList {
+	
+	@ApiModelProperty(value="List of properties of registered discovery services", readOnly=true)
+	DiscoveryServiceProperties[] items;
+
+	@ApiModelProperty(value="Number of items in the list", readOnly=true)
+	int count;
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java
new file mode 100755
index 0000000..392ca82
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRequest.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.Map;
+
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * 
+ * This class represents an analysis request that is passed from ODF to the service.
+ *
+ */
+@ApiModel(description="Request for running a single discovery service.")
+public class DiscoveryServiceRequest {
+	/**
+	 * The discoveryService identifier
+	 */
+	@ApiModelProperty(value="Id string of the discovery service to be issued", required=true)
+	private String discoveryServiceId;
+	/**
+	 * This property can be used by a user to pass additional information from the analysis request to the service execution
+	 */
+	@ApiModelProperty(value="Optional additional properties to be passed to the discovery service", required=false)
+	private Map<String, Object> additionalProperties;
+
+	@ApiModelProperty(value="User id under which the discovery service is supposed to run", required=true)
+	private String user;
+	/**
+	 * This property contains information about the data that is supposed to be analysed
+	 */
+	@ApiModelProperty(value="Data set to be analyzed along with cached metadata objects", required=true)
+	private DataSetContainer dataSetContainer;
+
+	@ApiModelProperty(value="Unique id of the analysis request to which the discovery service request belongs to", required=true)
+	private String odfRequestId;
+
+	@ApiModelProperty(value="URL of ODF admin API for remote access to metadata", required=false)
+	private String odfUrl;
+
+	@ApiModelProperty(value="ODF user id for remote access to metadata", required=false)
+	private String odfUser;
+
+	@ApiModelProperty(value="ODF password for remote access to metadata", required=false)
+	private String odfPassword;
+	/**
+	 * timestamp of the time the request was put on the ODF request queue
+	 */
+	@ApiModelProperty(value="Timestamp when the request was put on ODF request queue", required=true)
+	private long putOnRequestQueue;
+	/**
+	 * timestamp of the time the request was taken from the queue and execution was started
+	 */
+	@ApiModelProperty(value="Timestamp when the execution was started", required=true)
+	private long takenFromRequestQueue;
+	/**
+	 * timestamp of the time the request was processed successfully
+	 */
+	@ApiModelProperty(value="Timestamp when processing was finished", required=true)
+	private long finishedProcessing;
+	/**
+	 * duration needed for storing the analysis results
+	 */
+	@ApiModelProperty(value="Time needed for storing results in metadata repository", required=true)
+	private long timeSpentStoringResults;
+
+	public String getDiscoveryServiceId() {
+		return discoveryServiceId;
+	}
+
+	public void setDiscoveryServiceId(String discoveryServiceId) {
+		this.discoveryServiceId = discoveryServiceId;
+	}
+
+	public Map<String, Object> getAdditionalProperties() {
+		return additionalProperties;
+	}
+
+	public void setAdditionalProperties(Map<String, Object> additionalProperties) {
+		this.additionalProperties = additionalProperties;
+	}
+
+	public String getUser() {
+		return user;
+	}
+
+	public void setUser(String user) {
+		this.user = user;
+	}
+
+	public DataSetContainer getDataSetContainer() {
+		return dataSetContainer;
+	}
+
+	public void setDataSetContainer(DataSetContainer dataSet) {
+		this.dataSetContainer = dataSet;
+	}
+
+	public String getOdfRequestId() {
+		return odfRequestId;
+	}
+
+	public void setOdfRequestId(String odfRequestId) {
+		this.odfRequestId = odfRequestId;
+	}
+
+	public String getOdfUrl() {
+		return odfUrl;
+	}
+
+	public void setOdfUrl(String odfUrl) {
+		this.odfUrl = odfUrl;
+	}
+
+	public String getOdfUser() {
+		return this.odfUser;
+	}
+
+	public void setOdfUser(String odfUser) {
+		this.odfUser = odfUser;
+	}
+
+	public String getOdfPassword() {
+		return this.odfPassword;
+	}
+
+	public void setOdfPassword(String odfPassword) {
+		this.odfPassword = odfPassword;
+	}
+
+	public long getFinishedProcessing() {
+		return finishedProcessing;
+	}
+
+	public void setFinishedProcessing(long finishedProcessing) {
+		this.finishedProcessing = finishedProcessing;
+	}
+
+	public long getTakenFromRequestQueue() {
+		return takenFromRequestQueue;
+	}
+
+	public void setTakenFromRequestQueue(long takenFromRequestQueue) {
+		this.takenFromRequestQueue = takenFromRequestQueue;
+	}
+
+	public long getPutOnRequestQueue() {
+		return putOnRequestQueue;
+	}
+
+	public void setPutOnRequestQueue(long putOnRequestQueue) {
+		this.putOnRequestQueue = putOnRequestQueue;
+	}
+
+	public long getTimeSpentStoringResults() {
+		return timeSpentStoringResults;
+	}
+
+	public void setTimeSpentStoringResults(long timeSpentStoringResults) {
+		this.timeSpentStoringResults = timeSpentStoringResults;
+	}
+
+}
+
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java
new file mode 100755
index 0000000..8208744
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResponse.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import com.fasterxml.jackson.annotation.JsonSubTypes;
+import com.fasterxml.jackson.annotation.JsonSubTypes.Type;
+import com.fasterxml.jackson.annotation.JsonTypeInfo;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+@JsonTypeInfo(  
+	    use = JsonTypeInfo.Id.NAME,  
+	    include = JsonTypeInfo.As.PROPERTY,  
+	    property = "type")
+
+@JsonSubTypes({  
+    @Type(value = DiscoveryServiceAsyncStartResponse.class, name = "async"),  
+    @Type(value = DiscoveryServiceSyncResponse.class, name = "sync") })  
+@ApiModel(description="Response returned by the discovery service.", subTypes={DiscoveryServiceAsyncStartResponse.class,DiscoveryServiceSyncResponse.class}, discriminator="type")
+public abstract class DiscoveryServiceResponse {
+	public static enum ResponseCode {
+		OK, NOT_AUTHORIZED, TEMPORARILY_UNAVAILABLE, UNKNOWN_ERROR
+	};
+
+	@ApiModelProperty(value="Response code indicating whether the discovery service request was issued successfully", readOnly=true, required=true)
+	private ResponseCode code;
+
+	@ApiModelProperty(value="Detailed status of the analysis request", readOnly=true, required=false)
+	private String details;
+
+	public ResponseCode getCode() {
+		return code;
+	}
+
+	public void setCode(ResponseCode code) {
+		this.code = code;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java
new file mode 100755
index 0000000..5c7fff9
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceResult.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * 
+ * This class must be returned by a service so that ODF can store the results.
+ * 
+ */
+@ApiModel(description="Results of a discovery service run.")
+public class DiscoveryServiceResult {
+
+	/**
+	 * The actual results of the service execution
+	 */
+	@ApiModelProperty(value="List of annotations generated by the discovery service run (following the format of the annotationPrototypes)", readOnly=true)
+	private List<Annotation> annotations;
+
+	public List<Annotation> getAnnotations() {
+		return annotations;
+	}
+
+	public void setAnnotations(List<Annotation> annotations) {
+		this.annotations = annotations;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java
new file mode 100755
index 0000000..15127e3
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceRuntimeStatistics.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * JSON object representing runtime statics of a discovery service.
+ * 
+ *
+ */
+
+@ApiModel(description="Runtime statistics of a discovery service")
+public class DiscoveryServiceRuntimeStatistics {
+	
+	// TODO: placeholder for things to add
+	@ApiModelProperty(value="Average processing time per item (in milliseconds)", readOnly=true)
+	long averageProcessingTimePerItemInMillis;
+
+	public long getAverageProcessingTimePerItemInMillis() {
+		return averageProcessingTimePerItemInMillis;
+	}
+
+	public void setAverageProcessingTimePerItemInMillis(long averageProcessingTimePerItemInMillis) {
+		this.averageProcessingTimePerItemInMillis = averageProcessingTimePerItemInMillis;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java
new file mode 100755
index 0000000..d377947
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceSparkEndpoint.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+//JSON
+/**
+ * 
+ * This class describes a REST endpoint representing a remote service that can be used by ODF
+ * Note: It doesn't inherit from DiscoveryServiceEndpoint. To convert this from / to this class use JSONUtils.convert()
+ *
+ */
+public class DiscoveryServiceSparkEndpoint {
+	/**
+	 * This property informs ODF about the type of input for the underlying Spark job, (CSV) file vs. (Database) connection.  
+	 */
+	public static enum SERVICE_INTERFACE_TYPE {
+		DataFrame, Generic
+	}
+
+	public static String ANNOTATION_PROPERTY_COLUMN_NAME = "ODF_ANNOTATED_COLUMN";
+	public static String ANNOTATION_SUMMARY_COLUMN_NAME = "ODF_ANNOTATION_SUMMARY";
+	public static String ODF_BEGIN_OF_ANNOTATION_RESULTS = "***ODF_BEGIN_OF_ANNOTATION_RESULTS***\n";
+
+	private String runtimeName;
+
+	private SERVICE_INTERFACE_TYPE inputMethod = null;
+
+	private String jar;
+
+	private String className;
+
+	public DiscoveryServiceSparkEndpoint() {
+		this.setRuntimeName("Spark");
+	}
+	
+	public String getJar() {
+		return jar;
+	}
+
+	public void setJar(String jar) {
+		this.jar = jar;
+	}
+
+	public String getClassName() {
+		return className;
+	}
+
+	public void setClassName(String className) {
+		this.className = className;
+	}
+
+	public SERVICE_INTERFACE_TYPE getInputMethod() {
+		return inputMethod;
+	}
+
+	public void setInputMethod(SERVICE_INTERFACE_TYPE inputMethod) {
+		this.inputMethod = inputMethod;
+	}
+
+	public String getRuntimeName() {
+		return runtimeName;
+	}
+
+	public void setRuntimeName(String runtimeName) {
+		this.runtimeName = runtimeName;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java
new file mode 100755
index 0000000..f263a9e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/DiscoveryServiceStatus.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of a discovery service")
+public class DiscoveryServiceStatus {
+	public static enum Status {
+		OK, ERROR
+	};
+
+	/**
+	 * JSON object representing the status of a discovery service.
+	 */
+
+	@ApiModelProperty(value="Status of the ODF service", allowableValues="OK,ERROR", readOnly=true, required=true)
+	Status status;
+
+	@ApiModelProperty(value="Status message", readOnly=true, required=true)
+	String message;
+	
+	@ApiModelProperty(value="Status count of the discovery service", readOnly=true, required=true)
+	ServiceStatusCount statusCount;
+
+	public Status getStatus() {
+		return status;
+	}
+
+	public void setStatus(Status status) {
+		this.status = status;
+	}
+
+	public String getMessage() {
+		return message;
+	}
+
+	public void setMessage(String message) {
+		this.message = message;
+	}
+
+	public ServiceStatusCount getStatusCount() {
+		return statusCount;
+	}
+
+	public void setStatusCount(ServiceStatusCount statusCount) {
+		this.statusCount = statusCount;
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java
new file mode 100755
index 0000000..c320bf6
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceNotFoundException.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.text.MessageFormat;
+
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+public class ServiceNotFoundException extends ValidationException {
+
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = 1L;
+	private String serviceId;
+	
+	public ServiceNotFoundException(String serviceId) {
+		super("Service not found");
+		this.serviceId = serviceId;
+	}
+
+	@Override
+	public String getMessage() {
+		return MessageFormat.format("Discovery service with id {0} is not registered", serviceId);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java
new file mode 100755
index 0000000..85ba444
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/ServiceStatusCount.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of a discovery service.")
+public class ServiceStatusCount {
+	@ApiModelProperty(value="Id string of the discovery service", readOnly=true, required=true)
+	private String id;
+
+	@ApiModelProperty(value="Descriptive name of the discovery service", readOnly=true, required=true)
+	private String name;
+
+	@ApiModelProperty(value="Status of the discovery service", readOnly=true)
+	private Map<STATUS, Integer> statusCountMap = new HashMap<STATUS, Integer>();
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public Map<STATUS, Integer> getStatusCountMap() {
+		return statusCountMap;
+	}
+
+	public void setStatusCountMap(Map<STATUS, Integer> statusCountMap) {
+		this.statusCountMap = statusCountMap;
+	}
+
+	public String getName() {
+		return name;
+	}
+
+	public void setName(String name) {
+		this.name = name;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java
new file mode 100755
index 0000000..ef6666e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/SyncDiscoveryServiceBase.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+
+/**
+ * 
+ * This is an abstract class to extend when creating a synchronous discovery service
+ *
+ */
+public abstract class SyncDiscoveryServiceBase extends DiscoveryServiceBase implements SyncDiscoveryService {
+	
+	protected DiscoveryServiceSyncResponse createSyncResponse(ResponseCode code, String detailsMessage, List<? extends Annotation> annotations) {
+		try {
+			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+			response.setCode(code);
+			response.setDetails(detailsMessage);
+			DiscoveryServiceResult result = new DiscoveryServiceResult();
+			if (annotations != null) {
+				result.setAnnotations((List<Annotation>) annotations);
+			}
+			response.setResult(result);
+			return response;
+		} catch (Exception exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java
new file mode 100755
index 0000000..8a98a6b
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/AsyncDiscoveryService.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.async;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+
+
+/**
+ * An asynchronous discovery service must implement this interface
+ *
+ */
+public interface AsyncDiscoveryService extends DiscoveryService {
+	DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request);
+
+	DiscoveryServiceAsyncRunStatus getStatus(String runId);
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java
new file mode 100755
index 0000000..49b50ca
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncRunStatus.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.async;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * 
+ * An object of this class must be returned when ODF requests the status of an analysis run.
+ *
+ */
+@ApiModel(description="Status of an asynchronous discovery service run.")
+public class DiscoveryServiceAsyncRunStatus {
+	public static enum State {
+		RUNNING, ERROR, NOT_FOUND, FINISHED
+	}
+
+	@ApiModelProperty(value="Id of the discovery service run", readOnly=true, required=true)
+	private String runId;
+
+	@ApiModelProperty(value="Status of the discovery service run", readOnly=true, required=true)
+	private State state;
+
+	@ApiModelProperty(value="Optional status message", readOnly=true)
+	private String details;
+
+	@ApiModelProperty(value="Result of the discovery service run (if already available)", readOnly=true)
+	private DiscoveryServiceResult result;
+
+	public String getRunId() {
+		return runId;
+	}
+
+	public void setRunId(String runId) {
+		this.runId = runId;
+	}
+
+	public State getState() {
+		return state;
+	}
+
+	public void setState(State state) {
+		this.state = state;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+	public DiscoveryServiceResult getResult() {
+		return result;
+	}
+
+	public void setResult(DiscoveryServiceResult result) {
+		this.result = result;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java
new file mode 100755
index 0000000..5d6027d
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/async/DiscoveryServiceAsyncStartResponse.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.async;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * An object of this class must be returned by an asynchronous service after starting
+ *
+ */
+@ApiModel(description="Response returned by an asynchronous discovery service.")
+public class DiscoveryServiceAsyncStartResponse extends DiscoveryServiceResponse {
+	/**
+	 * Property identifying the running analysis. This id will be used to repeatedly request the status of the analysis.
+	 */
+	@ApiModelProperty(value="Id of the analysis request (asynchronous requests only)", readOnly=true, required=true)
+	private String runId;
+
+	public String getRunId() {
+		return runId;
+	}
+
+	public void setRunId(String runId) {
+		this.runId = runId;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java
new file mode 100755
index 0000000..ed57357
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/DataSetContainer.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.datasets;
+
+import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * This class is a reference to a metadata object in a metadata store
+ * 
+ */
+@ApiModel(description="Container keeping reference to data set along with cached metadata objects.")
+public class DataSetContainer {
+
+	@ApiModelProperty(value="Reference to the data set to be analyzed", required=true)
+	private MetaDataObject oMDataSet;
+
+	@ApiModelProperty(value="A Metadata cache that may be used by discovery services if access to the metadata store is not available", required=false)
+	private MetaDataCache metaDataCache;
+
+	public MetaDataObject getDataSet() {
+		return oMDataSet;
+	}
+
+	public void setDataSet(MetaDataObject oMDataSet) {
+		this.oMDataSet = oMDataSet;
+	}
+
+	public MetaDataCache getMetaDataCache() {
+		return metaDataCache;
+	}
+
+	public void setMetaDataCache(MetaDataCache metaDataCache) {
+		this.metaDataCache = metaDataCache;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java
new file mode 100755
index 0000000..a00c4eb
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/datasets/MaterializedDataSet.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.datasets;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+
+// JSON
+/**
+ * This class represents the materialized contents of a data set
+ *
+ */
+public class MaterializedDataSet {
+	private RelationalDataSet table;
+	private List<Column> oMColumns;
+
+	// row data in the same order as the oMColumns 
+	private List<List<Object>> data;
+
+	public List<Column> getColumns() {
+		return oMColumns;
+	}
+
+	public void setColumns(List<Column> oMColumns) {
+		this.oMColumns = oMColumns;
+	}
+
+	public RelationalDataSet getTable() {
+		return table;
+	}
+
+	public void setTable(RelationalDataSet table) {
+		this.table = table;
+	}
+
+	public List<List<Object>> getData() {
+		return data;
+	}
+
+	public void setData(List<List<Object>> data) {
+		this.data = data;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java
new file mode 100755
index 0000000..b5b69f4
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/DiscoveryServiceSyncResponse.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.sync;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * An object of this class must be returned by a synchronous discovery service
+ *
+ */
+@ApiModel(description="Response returned by a synchronous discovery service.")
+public class DiscoveryServiceSyncResponse extends DiscoveryServiceResponse {
+	@ApiModelProperty(value="Result of the analysis (synchronous requests only)", readOnly=true, required=true)
+	private DiscoveryServiceResult result;
+
+	public DiscoveryServiceResult getResult() {
+		return result;
+	}
+
+	public void setResult(DiscoveryServiceResult result) {
+		this.result = result;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java
new file mode 100755
index 0000000..626d78c
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/discoveryservice/sync/SyncDiscoveryService.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.discoveryservice.sync;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+
+/**
+ * 
+ * Synchronous discovery services must implement this interface
+ *
+ */
+public interface SyncDiscoveryService extends DiscoveryService {
+
+    /**
+     * Runs the actual discovery service.
+     * 
+     * @param request Request parameter that includes a reference to the data set to be analyzed
+     * @return Response object that includes the annotations to be created along with status information
+     */
+	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java
new file mode 100755
index 0000000..0805b30
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/BrokerNode.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Kafka broker node details")
+public class BrokerNode {
+	@ApiModelProperty(value="Kafka broker identifier", readOnly=true, required=true)
+	private String host;
+
+	@ApiModelProperty(value="Indicates whether the broker is the leader of the partition", readOnly=true, required=true)
+	private boolean isLeader;
+
+	public boolean isLeader() {
+		return isLeader;
+	}
+
+	public void setLeader(boolean isLeader) {
+		this.isLeader = isLeader;
+	}
+
+	public String getHost() {
+		return host;
+	}
+
+	public void setHost(String host) {
+		this.host = host;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java
new file mode 100755
index 0000000..4c441a9
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/EngineManager.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.List;
+
+/**
+*
+* External Java API for managing and controlling the ODF engine
+*
+*/
+public interface EngineManager {
+
+	/**
+	 * Checks the health status of ODF
+	 *
+	 * @return Health status of the ODF engine
+	 */
+	public SystemHealth checkHealthStatus();
+
+	
+	/**
+	 * Get information about all available service runtimes.
+	 * 
+	 * @return Runtimes info
+	 */
+	ServiceRuntimesInfo getRuntimesInfo();
+	
+	/**
+	 * Returns the status of the ODF thread manager
+	 *
+	 * @return Status of all threads making up the ODF thread manager
+	 */
+	public List<ThreadStatus> getThreadManagerStatus();
+
+	/**
+	 * Returns the status of the ODF messaging subsystem
+	 *
+	 * @return Status of the ODF messaging subsystem
+	 */
+	public MessagingStatus getMessagingStatus();
+
+	/**
+	 * Returns the status of the messaging subsystem and the internal thread manager
+	 *
+	 * @return Combined status of the messaging subsystem and the internal thread manager
+	 */
+	public ODFStatus getStatus();
+
+	/**
+	 * Returns the current ODF version
+	 *
+	 * @return ODF version identifier
+	 */
+	public ODFVersion getVersion();
+
+	/**
+	 * Shuts down the ODF engine, purges all scheduled analysis requests from the queues, and cancels all running analysis requests.
+	 * This means that all running jobs will be cancelled or their results will not be reported back.
+	 * (for debugging purposes only)
+	 * 
+	 * @param options Option for immediately restarting the engine after shutdown (default is not to restart immediately but only when needed) 
+	 */
+	public void shutdown(ODFEngineOptions options);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java
new file mode 100755
index 0000000..fdd84af
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaBrokerPartitionMessageCountInfo.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class KafkaBrokerPartitionMessageCountInfo {
+
+	private String broker;
+	private Map<Integer, Long> partitionMsgCountMap = new HashMap<Integer, Long>();
+
+	public String getBroker() {
+		return broker;
+	}
+
+	public void setBroker(String broker) {
+		this.broker = broker;
+	}
+
+	public Map<Integer, Long> getPartitionMsgCountMap() {
+		return partitionMsgCountMap;
+	}
+
+	public void setPartitionMsgCountMap(Map<Integer, Long> partitionMsgCountMap) {
+		this.partitionMsgCountMap = partitionMsgCountMap;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java
new file mode 100755
index 0000000..5f6e4f8
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaGroupOffsetInfo.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Information on Kafka offsets per group id")
+public class KafkaGroupOffsetInfo {
+	@ApiModelProperty(value="Kafka group id", readOnly=true, required=true)
+	private String groupId;
+
+	@ApiModelProperty(value="List of Kafka offsets", readOnly=true, required=true)
+	private List<PartitionOffsetInfo> offsets = new ArrayList<PartitionOffsetInfo>();
+
+	public String getGroupId() {
+		return groupId;
+	}
+
+	public void setGroupId(String groupId) {
+		this.groupId = groupId;
+	}
+
+	public List<PartitionOffsetInfo> getOffsets() {
+		return offsets;
+	}
+
+	public void setOffsets(List<PartitionOffsetInfo> offsets) {
+		this.offsets = offsets;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java
new file mode 100755
index 0000000..8ab8f15
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaPartitionInfo.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Kafka nodes belonging to a specific partition")
+public class KafkaPartitionInfo {
+	@ApiModelProperty(value="Partition id", readOnly=true, required=true)
+	private Integer partitionId;
+
+	@ApiModelProperty(value="List of nodes containing this partition", readOnly=true, required=true)
+	private List<BrokerNode> nodes;
+
+	public List<BrokerNode> getNodes() {
+		return nodes;
+	}
+
+	public void setNodes(List<BrokerNode> nodes) {
+		this.nodes = nodes;
+	}
+
+	public Integer getPartitionId() {
+		return partitionId;
+	}
+
+	public void setPartitionId(Integer partitionId) {
+		this.partitionId = partitionId;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java
new file mode 100755
index 0000000..10ff1a5
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaStatus.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of the Kafka ODF queues")
+public class KafkaStatus extends MessagingStatus {
+	@ApiModelProperty(value="List of message brokers", readOnly=true)
+	private List<String> brokers = new ArrayList<String>();
+
+	@ApiModelProperty(value="Status of the individual topics", readOnly=true)
+	private List<KafkaTopicStatus> topicStatus = new ArrayList<KafkaTopicStatus>();
+
+	public List<String> getBrokers() {
+		return brokers;
+	}
+
+	public void setBrokers(List<String> brokers) {
+		this.brokers = brokers;
+	}
+
+	public List<KafkaTopicStatus> getTopicStatus() {
+		return topicStatus;
+	}
+
+	public void setTopicStatus(List<KafkaTopicStatus> topicStatus) {
+		this.topicStatus = topicStatus;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java
new file mode 100755
index 0000000..7e41939
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/KafkaTopicStatus.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of an individual Kafka topic")
+public class KafkaTopicStatus {
+	
+	@ApiModelProperty(value="Kafka topic", readOnly=true, required=true)
+	private String topic;
+
+	@ApiModelProperty(value="Information on Kafka offsets per group id (can be used by the admin to track how many messages are still waiting to be consumed)", readOnly=true, required=true)
+	private List<KafkaGroupOffsetInfo> consumerGroupOffsetInfo = new ArrayList<KafkaGroupOffsetInfo>();
+
+	@ApiModelProperty(value="List of Kafka partitions and the nodes they belong to", readOnly=true, required=true)
+	private List<KafkaPartitionInfo> partitionBrokersInfo = new ArrayList<KafkaPartitionInfo>();
+
+	@ApiModelProperty(value="Message counts of individual brokers", readOnly=true, required=true)
+	private List<KafkaBrokerPartitionMessageCountInfo> brokerPartitionMessageCountInfo = new ArrayList<KafkaBrokerPartitionMessageCountInfo>();
+
+	public String getTopic() {
+		return topic;
+	}
+
+	public void setTopic(String topic) {
+		this.topic = topic;
+	}
+
+	public List<KafkaGroupOffsetInfo> getConsumerGroupOffsetInfo() {
+		return consumerGroupOffsetInfo;
+	}
+
+	public void setConsumerGroupOffsetInfo(List<KafkaGroupOffsetInfo> offsetInfoList) {
+		this.consumerGroupOffsetInfo = offsetInfoList;
+	}
+
+	public List<KafkaPartitionInfo> getPartitionBrokersInfo() {
+		return partitionBrokersInfo;
+	}
+
+	public void setPartitionBrokersInfo(List<KafkaPartitionInfo> partitionBrokersMap) {
+		this.partitionBrokersInfo = partitionBrokersMap;
+	}
+
+	public List<KafkaBrokerPartitionMessageCountInfo> getBrokerPartitionMessageInfo() {
+		return brokerPartitionMessageCountInfo;
+	}
+
+	public void setBrokerPartitionMessageInfo(List<KafkaBrokerPartitionMessageCountInfo> brokerInfo) {
+		this.brokerPartitionMessageCountInfo = brokerInfo;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java
new file mode 100755
index 0000000..f3248ac
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/MessagingStatus.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel(description="Status of the ODF queues", subTypes={KafkaStatus.class})
+public class MessagingStatus {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java
new file mode 100755
index 0000000..fb3d3d6
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFEngineOptions.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="ODF startup options.")
+public class ODFEngineOptions {
+	
+	@ApiModelProperty(value="Indicates whether to explicitly restart the queues after shutting down the ODF engine (or to implicitly restart them when needed)", required=true)
+	private boolean restart = false;
+	
+	public boolean isRestart() {
+		return this.restart;
+	}
+
+	public void setRestart(boolean restart) {
+		this.restart = restart;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java
new file mode 100755
index 0000000..3ae9068
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFStatus.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Overall ODF status.")
+public class ODFStatus {
+
+	@ApiModelProperty(value="Status of the ODF queues", readOnly=true)
+	private MessagingStatus messagingStatus;
+
+	@ApiModelProperty(value="Status of the ODF thread manager", readOnly=true)
+	private List<ThreadStatus> threadManagerStatus;
+
+	public MessagingStatus getMessagingStatus() {
+		return this.messagingStatus;
+	}
+
+	public void setMessagingStatus(MessagingStatus messagingStatus) {
+		this.messagingStatus = messagingStatus;
+	}
+
+	public List<ThreadStatus> getThreadManagerStatus() {
+		return this.threadManagerStatus;
+	}
+
+	public void setThreadManagerStatus(List<ThreadStatus> threadManagerStatus) {
+		this.threadManagerStatus = threadManagerStatus;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java
new file mode 100755
index 0000000..d18825b
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ODFVersion.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="ODF version information.")
+public class ODFVersion {
+
+	@ApiModelProperty(value="Version of the ODF instance", readOnly=true, required=true)
+	private String version;
+
+	public String getVersion() {
+		return this.version;
+	}
+
+	public void setVersion(String version) {
+		this.version = version;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java
new file mode 100755
index 0000000..ccaec51
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/PartitionOffsetInfo.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of an individual Kafka offset")
+public class PartitionOffsetInfo {
+	@ApiModelProperty(value="Partition id", readOnly=true, required=true)
+	private Integer partitionId;
+
+	@ApiModelProperty(value="Kafka offset identifying the last consumed message within the partition", readOnly=true, required=true)
+	private Long offset;
+
+	@ApiModelProperty(value="Status message", readOnly=true)
+	private String message;
+
+	public Integer getPartitionId() {
+		return partitionId;
+	}
+
+	public void setPartitionId(Integer partitionId) {
+		this.partitionId = partitionId;
+	}
+
+	public Long getOffset() {
+		return offset;
+	}
+
+	public void setOffset(Long offset) {
+		this.offset = offset;
+	}
+
+	public String getMessage() {
+		return message;
+	}
+
+	public void setMessage(String message) {
+		this.message = message;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java
new file mode 100755
index 0000000..4f3e871
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimeInfo.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+public class ServiceRuntimeInfo {
+	private String name;
+	private String description;
+
+	public String getName() {
+		return name;
+	}
+
+	public void setName(String name) {
+		this.name = name;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public void setDescription(String description) {
+		this.description = description;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java
new file mode 100755
index 0000000..a244127
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ServiceRuntimesInfo.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.List;
+
+public class ServiceRuntimesInfo {
+	private List<ServiceRuntimeInfo> runtimes;
+
+	public List<ServiceRuntimeInfo> getRuntimes() {
+		return runtimes;
+	}
+
+	public void setRuntimes(List<ServiceRuntimeInfo> runtimes) {
+		this.runtimes = runtimes;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java
new file mode 100755
index 0000000..b6b918b
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/SystemHealth.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Overall ODF system health.")
+public class SystemHealth {
+	
+	public static enum HealthStatus {
+		OK, WARNING, ERROR
+	}
+
+	@ApiModelProperty(value="ODF health status", readOnly=true, required=true)
+	private HealthStatus status;
+
+	@ApiModelProperty(value="List of status messages", readOnly=true)
+	private List<String> messages = new ArrayList<>();
+
+	@ApiModelProperty(value="Health status of the individual subsystems", readOnly=true)
+	private List<SystemHealth> subSystemsHealth = new ArrayList<>();
+
+	public HealthStatus getStatus() {
+		return status;
+	}
+
+	public void setStatus(HealthStatus status) {
+		this.status = status;
+	}
+
+	public List<String> getMessages() {
+		return messages;
+	}
+
+	public void setMessages(List<String> messages) {
+		this.messages = messages;
+	}
+
+	public List<SystemHealth> getSubSystemsHealth() {
+		return subSystemsHealth;
+	}
+
+	public void setSubSystemsHealth(List<SystemHealth> subSystemsHealth) {
+		this.subSystemsHealth = subSystemsHealth;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java
new file mode 100755
index 0000000..74e939e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/engine/ThreadStatus.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.engine;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Status of the ODF thread manager")
+public class ThreadStatus {
+
+	public static enum ThreadState { RUNNING, FINISHED, NON_EXISTENT }
+
+	@ApiModelProperty(value="Thread id", readOnly=true)
+	private String id;
+
+	@ApiModelProperty(value="Thread status", readOnly=true)
+	private ThreadState state;
+
+	@ApiModelProperty(value="Thread type", readOnly=true)
+	private String type;
+
+	public String getType() {
+		return type;
+	}
+
+	public void setType(String type) {
+		this.type = type;
+	}
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public ThreadState getState() {
+		return state;
+	}
+
+	public void setState(ThreadState state) {
+		this.state = state;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java
new file mode 100755
index 0000000..1f48d0d
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AnnotationPropagator.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+
+/**
+ * Interface for the logic that propagates annotations from the AnnotationStore to the MetadataStore 
+ *
+ */
+public interface AnnotationPropagator {
+
+	/**
+	 * Run the actual propagation process 
+	 *@param as The annotation store from which the annotations should be taken from
+	 *@param requestId Propagate only annotations that belong to a specific analysis request id (optional)
+	 * 
+	 */
+	void propagateAnnotations(AnnotationStore as, String requestId);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java
new file mode 100755
index 0000000..849277c
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/AtlasMetadataQueryBuilder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+public class AtlasMetadataQueryBuilder extends MetadataQueryBuilder {
+
+	@Override
+	public String build() {
+		if (this.objectType != null) {
+			StringBuilder query = new StringBuilder("from " + objectType);
+			boolean firstCondition = true;
+			if (this.conditions != null) {
+				for (Condition condition : conditions) {
+					if (condition instanceof SimpleCondition) {
+						SimpleCondition simpleCond = (SimpleCondition) condition;
+						if (firstCondition) {
+							query.append(" where ");
+						} else {
+							query.append(" and ");
+						}
+						query.append(simpleCond.getAttributeName());
+						switch (simpleCond.getComparator()) {
+						case EQUALS:
+							query.append(" = ");
+							break;
+						case NOT_EQUALS:
+							query.append(" != ");
+							break;
+						default:
+							throw new RuntimeException("Comparator " + simpleCond.getComparator() + " is currently not supported");
+						}
+						Object val = simpleCond.getValue();
+						if (val instanceof MetaDataObjectReference) {
+							query.append("'" + ((MetaDataObjectReference) val).getId() + "'");
+						} else if (val instanceof String) {
+							query.append("'" + val.toString() + "'");
+						} else if (val == null) {
+							query.append("null");
+						} else {
+							query.append(val.toString());
+						}
+					}
+					firstCondition = false;
+				}
+			}
+			return query.toString();
+		}
+		return null;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java
new file mode 100755
index 0000000..c9e59e7
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/DefaultMetadataQueryBuilder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+public class DefaultMetadataQueryBuilder extends MetadataQueryBuilder {
+
+	public static final String SEPARATOR_STRING = " ";
+	public static final String DATASET_IDENTIFIER = "from";
+	public static final String CONDITION_PREFIX = "where";
+	public static final String AND_IDENTIFIER = "and";
+	public static final String EQUALS_IDENTIFIER = "=";
+	public static final String NOT_EQUALS_IDENTIFIER = "<>";
+	public static final String QUOTE_IDENTIFIER = "'";
+
+	@Override
+	public String build() {
+		if (this.objectType != null) {
+			StringBuilder query = new StringBuilder(DATASET_IDENTIFIER + SEPARATOR_STRING + objectType);
+			if (this.conditions != null) {
+				boolean firstCondition = true;
+				for (Condition condition : conditions) {
+					if (condition instanceof SimpleCondition) {
+						SimpleCondition simpleCond = (SimpleCondition) condition;
+						if (firstCondition) {
+							query.append(SEPARATOR_STRING + AND_IDENTIFIER + SEPARATOR_STRING);
+						} else {
+							query.append(SEPARATOR_STRING + CONDITION_PREFIX + SEPARATOR_STRING);
+						}
+						query.append(simpleCond.getAttributeName());
+						switch (simpleCond.getComparator()) {
+						case EQUALS:
+							query.append(SEPARATOR_STRING + EQUALS_IDENTIFIER + SEPARATOR_STRING);
+							break;
+						case NOT_EQUALS:
+							query.append(SEPARATOR_STRING + NOT_EQUALS_IDENTIFIER + SEPARATOR_STRING);
+							break;
+						default:
+							throw new RuntimeException("Comparator " + simpleCond.getComparator() + " is currently not supported");
+						}
+						Object val = simpleCond.getValue();
+						if (val instanceof MetaDataObjectReference) {
+							query.append(QUOTE_IDENTIFIER + ((MetaDataObjectReference) val).getId() + QUOTE_IDENTIFIER);
+						} else if (val instanceof String) {
+							query.append(QUOTE_IDENTIFIER + val.toString() + QUOTE_IDENTIFIER);
+						} else if (val == null) {
+							query.append("null");
+						} else {
+							query.append(val.toString());
+						}
+					}
+					firstCondition = false;
+				}
+			}
+			return query.toString();
+		}
+		return null;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java
new file mode 100755
index 0000000..41ad9e1
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ExternalStore.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.Properties;
+
+/**
+ * A common interface for stores that are external to ODF.
+ * Provides connection test methods and basic metadata about the store. 
+ *  
+ *
+ */
+public interface ExternalStore {
+	static enum ConnectionStatus { OK, AUTHORIZATION_FAILED, UNREACHABLE, UNKOWN_ERROR };
+	
+	static final String STORE_PROPERTY_DESCRIPTION = "STORE_PROPERTY_DESCRIPTION"; 
+	static final String STORE_PROPERTY_TYPE = "STORE_PROPERTY_TYPE"; 
+	static final String STORE_PROPERTY_ID = "STORE_PROPERTY_ID"; 
+	
+	/**
+	 * @return the properties of this metadata object store instance.
+	 * Must return at least STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_TYPE, and STORE_PROPERTY_ID.
+	 */
+	Properties getProperties();
+	
+	/**
+	 * @return the unique repository Id for this metadata store
+	 */
+	String getRepositoryId();
+	
+	ConnectionStatus testConnection();
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java
new file mode 100755
index 0000000..7eca5cb
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetaDataUtils.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+
+/**
+ * Internal metadata utilities
+ * 
+ */
+public class InternalMetaDataUtils {
+	public static final String ODF_PARENT_REFERENCE = "PARENT";
+	public static final String ODF_CHILDREN_REFERENCE = "CHILDREN";
+
+	/**
+	 * Turn a list of metadata objects into a list of references to the corresponding metadata objects
+	 *  
+	 * @param objectList Given list of metadata objects
+	 * @return Resulting list of references to the metadata objects
+	 */
+	public static List<MetaDataObjectReference> getReferenceList(List<MetaDataObject> objectList) {
+		List<MetaDataObjectReference> result = new ArrayList<MetaDataObjectReference>();
+		for (MetaDataObject obj : objectList) {
+			result.add(obj.getReference());
+		}
+		return result;
+	}
+
+	/**
+	 * Convert a list of metadata object references into a list of the corresponding metadata objects
+	 *  
+	 * @param referenceList Given list of metadata object references
+	 * @return Resulting list metadata objects
+	 */
+	public static <T> List<T>  getObjectList(MetadataStore mds, List<MetaDataObjectReference> referenceList, Class<T> type) {
+		List<T> result = new ArrayList<T>();
+		for (MetaDataObjectReference ref : referenceList) {
+			MetaDataObject obj = mds.retrieve(ref);
+			if (obj != null) {
+				try {
+					result.add(type.cast(obj));
+				} catch(ClassCastException e) {
+					String errorMessage = MessageFormat.format("Metadata object with id ''{0}'' cannot be cast to type ''{1}''.", new Object[] { ref.getId(), type.getName() });
+					throw new MetadataStoreException(errorMessage);
+				}
+			} else {
+				String errorMessage = MessageFormat.format("Metadata object with reference ''{0}'' could not be retrieved from metadata store ''{1}''.", new Object[] { ref, mds.getRepositoryId() });
+				throw new MetadataStoreException(errorMessage);
+			}
+		}
+		return result;
+	}
+
+	/**
+	 * Merge a set of given list of references to metadata objects into a single list.
+	 *  
+	 * @param refListArray Array of given lists of references
+	 * @return Resulting merged list of references
+	 */
+	@SafeVarargs
+	public static List<MetaDataObjectReference> mergeReferenceLists(List<MetaDataObjectReference>... refListArray) {
+		HashMap<String, MetaDataObjectReference> referenceHashMap = new HashMap<String, MetaDataObjectReference>();
+		for (List<MetaDataObjectReference> refList : refListArray) {
+			if (refList != null) {
+				for (MetaDataObjectReference ref : refList) {
+					referenceHashMap.put(ref.getId(), ref);
+				}
+			}
+		}
+		return new ArrayList<MetaDataObjectReference>(referenceHashMap.values());
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java
new file mode 100755
index 0000000..e5ebfda
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InternalMetadataStoreBase.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+
+/**
+ * Common base for default metadata store and metadata cache.
+ * 
+ * 
+ */
+public abstract class InternalMetadataStoreBase extends MetadataStoreBase implements MetadataStore {
+
+	protected abstract HashMap<String, StoredMetaDataObject> getObjects();
+
+	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type) {
+		if ((metaDataObject == null) || (metaDataObject.getReference() == null)) {
+			throw new MetadataStoreException("Metadata object or its reference attribute cannot be null.");
+		}
+		List<T> result = new ArrayList<T>();
+		StoredMetaDataObject internalObj = getObjects().get(metaDataObject.getReference().getId());
+		if ((internalObj != null) && (internalObj.getReferenceMap().get(attributeName) != null)) {
+			for (MetaDataObjectReference ref : internalObj.getReferenceMap().get(attributeName)) {
+				MetaDataObject obj = retrieve(ref);
+				if (obj != null) {
+					// Ignore objects that are not available in metadata store
+					// TODO: Consider to use invalide reference if an object is not available
+					try {
+						result.add(type.cast(retrieve(ref)));
+					} catch(ClassCastException e) {
+						String errorMessage = MessageFormat.format("Inconsistent object reference: A reference of type ''{0}'' cannot be cast to type ''{1}''.", new Object[] { attributeName, type.getName() });
+						throw new MetadataStoreException(errorMessage);
+					}
+				}
+			}
+		}
+		return result;
+	}
+
+	abstract protected Object getAccessLock();
+
+	@Override
+	public MetaDataObject getParent(MetaDataObject metaDataObject) {
+		List<MetaDataObject> parentList = new ArrayList<MetaDataObject>();
+		// TODO: Make this more efficient
+		for (StoredMetaDataObject internalMdo : getObjects().values()) {
+			for (MetaDataObject child : getChildren(internalMdo.getMetaDataObject())) {
+				if (child.getReference().getId().equals(metaDataObject.getReference().getId())) {
+					parentList.add(internalMdo.getMetaDataObject());
+				}
+			}
+		}
+		if (parentList.size() == 1) {
+			return parentList.get(0);
+		} else if (parentList.size() == 0) {
+			return null;
+		}
+		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
+		throw new MetadataStoreException(errorMessage);
+	}
+
+	@Override
+	public MetaDataObject retrieve(MetaDataObjectReference reference) {
+		synchronized(getAccessLock()) {
+			String objectId = reference.getId();
+			if (getObjects().containsKey(objectId)) {
+				return getObjects().get(objectId).getMetaDataObject();
+			}
+			return null;
+		}
+	}
+
+	@Override
+	public MetadataQueryBuilder newQueryBuilder() {
+		return new DefaultMetadataQueryBuilder();
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java
new file mode 100755
index 0000000..d112720
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/InvalidReference.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Helper method to handle "invalid" references. 
+ * 
+ * Invalid references are typically returned by the metadata store implementation to indicate that a reference (or a reference list) was not provided.
+ * This could be the case, e.g., for performance reasons when finding a reference might be time consuming. 
+ * In such a case the application should explicitly use the MetadataQueryBuilder to get to the reference (list).
+ * 
+ * Clients should check any MetadataObjectReference and List<MetadataObjectRefernce> 
+ * in retrieved MetadataObjects if the value is an instance of this class.
+ * 
+ * 
+ */
+public class InvalidReference  {
+	
+	public static final String INVALID_METADATAOBJECT_REFERENCE_ID = "INVALID_METADATAOBJECT_REFERENCE_ID";
+	public static final String INVALID_METADATAOBJECT_REFERENCE_LIST_ID = "INVALID_METADATAOBJECT_REFERENCE_LIST_ID";
+	
+	/**
+	 * use this method to indicate that a reference is invalid.
+	 */
+	public static MetaDataObjectReference createInvalidReference(String repositoryId) {
+		MetaDataObjectReference invalidRef = new MetaDataObjectReference();
+		invalidRef.setRepositoryId(repositoryId);
+		invalidRef.setId(INVALID_METADATAOBJECT_REFERENCE_ID);
+		return invalidRef;
+	}
+	
+	public static boolean isInvalidRef(MetaDataObjectReference ref) {
+		if (ref == null) {
+			return false;
+		}
+		return INVALID_METADATAOBJECT_REFERENCE_ID.equals(ref.getId());
+	}
+	
+	
+	/**
+	 * use this method to indicate that a list of references is invalid.
+	 */
+	public static List<MetaDataObjectReference> createInvalidReferenceList(String repositoryId) {
+		List<MetaDataObjectReference> invalidRefList = new ArrayList<>();
+		MetaDataObjectReference invalidRefMarker = new MetaDataObjectReference();
+		invalidRefMarker.setRepositoryId(repositoryId);
+		invalidRefMarker.setId(INVALID_METADATAOBJECT_REFERENCE_LIST_ID);
+		invalidRefList.add(invalidRefMarker);
+		return invalidRefList;
+	}
+	
+	public static boolean isInvalidRefList(List<MetaDataObjectReference> refList) {
+		if (refList.size() != 1) {
+			return false;
+		}
+		MetaDataObjectReference ref = refList.get(0);
+		if (ref == null) {
+			return false;
+		}
+		return INVALID_METADATAOBJECT_REFERENCE_LIST_ID.equals(ref.getId());
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java
new file mode 100755
index 0000000..de61568
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetaDataObjectReference.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * This class describes the location of a MetadataObject
+ *
+ */
+@ApiModel(description="Reference to a metadata object.")
+public class MetaDataObjectReference {
+	@ApiModelProperty(value="Unique id of the object", required=true)
+	private String id;
+
+	@ApiModelProperty(value="Id of the metadata repository where the object is registered", required=true)
+	private String repositoryId;
+
+	@ApiModelProperty(value="URL of the object in the metadata repository", required=true)
+	private String url;
+
+	@JsonIgnore
+	private ReferenceCache cache;
+
+	public String getId() {
+		return id;
+	}
+
+	public void setId(String id) {
+		this.id = id;
+	}
+
+	public boolean equals(Object other) {
+		if (other == null) {
+			return false;
+		}
+		if (!(other instanceof MetaDataObjectReference)) {
+			return false;
+		}
+		MetaDataObjectReference otherMDO = (MetaDataObjectReference) other;
+		if (!this.id.equals(otherMDO.id)) {
+			return false;
+		}
+		if (this.repositoryId == null) {
+			return otherMDO.repositoryId == null;
+		}
+		return this.repositoryId.equals(otherMDO.repositoryId);
+	}
+
+	public int hashCode() {
+		int result = 0;
+		if (this.repositoryId != null) {
+			result = repositoryId.hashCode();
+		}
+		return result + this.id.hashCode();
+	}
+
+	public String toString() {
+		return this.repositoryId + "|||" + this.id;
+	}
+
+	public String getRepositoryId() {
+		return repositoryId;
+	}
+
+	public void setRepositoryId(String repositoryId) {
+		this.repositoryId = repositoryId;
+	}
+
+	public String getUrl() {
+		return url;
+	}
+
+	public void setUrl(String url) {
+		this.url = url;
+	}
+
+	public ReferenceCache getCache() {
+		return cache;
+	}
+
+	public void setCache(ReferenceCache cache) {
+		this.cache = cache;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java
new file mode 100755
index 0000000..643f203
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataQueryBuilder.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Abstract base class for a builder that can be used to create metadata queries.
+ * It uses the Java builder pattern.
+ * 
+ * There are two types of methods:
+ * 1. Chainable methods that can be used to do simple filtering, e.g.,
+ *       {@code String query = queryBuilder.objectType("DataSet").simpleCondition("name", COMPARATOR.EQUALS, "waldo").build();}
+ * 2. Predefined queries that are not chainable. These are very specific queries that currently cannot be built with the chainable methods, e.g.,
+ *       {@code String query = queryBuilder.connectionsForDataSet(dataSetId).build();}
+ * 
+ * When subclassing, note that the methods set the appropriate protected fields to null to indicate that the query was "overwritten". 
+ * 
+ * @See {@link MetadataStore}
+ */
+public abstract class MetadataQueryBuilder {
+
+	public static enum COMPARATOR {
+		EQUALS, NOT_EQUALS
+	};
+
+	protected static class Condition {
+	};
+
+	protected static class SimpleCondition extends Condition {
+		public SimpleCondition(String attributeName, COMPARATOR comparator, Object value) {
+			super();
+			this.attributeName = attributeName;
+			this.comparator = comparator;
+			this.value = value;
+		}
+
+		private String attributeName;
+		private COMPARATOR comparator;
+		private Object value;
+
+		public String getAttributeName() {
+			return attributeName;
+		}
+
+		public COMPARATOR getComparator() {
+			return comparator;
+		}
+
+		public Object getValue() {
+			return value;
+		}
+
+	}
+
+	protected String objectType;
+	protected List<Condition> conditions;
+
+	public abstract String build();
+
+	/**
+	 * Set the type of object to be queried. Names are the ones of the common model (e.g. Table, Column, etc.)
+	 */
+	public MetadataQueryBuilder objectType(String objectTypeName) {
+		this.objectType = objectTypeName;
+		return this;
+	}
+
+	/**
+	 * Add a simple condition to the query. All conditions are "ANDed".
+	 */
+	public MetadataQueryBuilder simpleCondition(String attributeName, COMPARATOR comparator, Object value) {
+		if (conditions == null) {
+			conditions = new ArrayList<>();
+		}
+		conditions.add(new SimpleCondition(attributeName, comparator, value));
+		return this;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java
new file mode 100755
index 0000000..7a50ced
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStore.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+
+/**
+ * Interfaces to be implemented by a metadata store in order to be used with ODF.
+ * 
+ * In addition to this interface, each ODF metadata store must support the ODF base types defined by the
+ * {@link WritableMetadataStoreUtils#getBaseTypes} method.
+ *
+ */
+public interface MetadataStore extends ExternalStore {
+
+	/**
+	 * Retrieve information required to access the actual data behind an information asset, e.g. the connection info
+	 * to retrieve the data in a JDBC table.
+	 *  
+	 * @param informationAsset Given information asset
+	 * @return Connection information required for data access
+	 */
+	ConnectionInfo getConnectionInfo(MetaDataObject informationAsset);
+
+	/**
+	 * Retrieve a metadata object by its metadata object reference.
+	 *  
+	 * @param reference Metadata object reference
+	 * @return Metadata object
+	 */
+	MetaDataObject retrieve(MetaDataObjectReference reference);
+	
+	/**
+	 * Perform a search against the metadata store. The query should be generated using the {@link MetadataQueryBuilder}
+	 * returned by the {@link #newQueryBuilder()} method.
+
+	 * @param query Query string
+	 * @return List of references to metadata objects found by the query
+	 */
+	List<MetaDataObjectReference> search(String query);
+	
+	/**
+	 * Populates the metadata store with example datasets. This method is optional, however in order to support the ODF
+	 * integration tests, this method must create the object returned by the {@link WritableMetadataStoreUtils#getSampleDataObjects}
+	 * method.
+	 * 
+	 */
+	void createSampleData();
+
+	/**
+	 * Deletes all data from this repository. This method is optional, however it must be implemented in order to support the ODF
+	 * integration tests.
+	 * 
+	 */
+	void resetAllData();
+	
+	MetadataQueryBuilder newQueryBuilder();
+	
+	/**
+	 * Return an implementation of the {@link AnnotationPropagator} interface that propagates ODF annotations into the metadata store.
+	 * The method may return null if the metadata store does not support annotation propagation.
+	 * 
+	 * return the AnnotationPropagator for this MetadataStore.
+	 */
+	AnnotationPropagator getAnnotationPropagator();
+
+	/**
+	 * Retrieve references of a specific type from an object stored in the metadata store.
+	 * A list of available reference types can be retrieved with the {@link #getReferenceTypes() getReferenceTypes} method.
+	 *  
+	 * @param metaDataObject Given metadata object to retrieve the references from
+	 * @param attributeName Name of the reference
+	 * @return List of objects referenced by the given metadata object
+	 */
+	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject);
+
+	/**
+	 * Return the list of available reference types supported by the {@link #getReferences(String, MetaDataObject) getReferences} method of the metadata store.
+	 * The list indicates which reference types are added to the internal metadata cache when a discovery service is called. That way, they will be available
+	 * to the service at runtime even if the service has no access to the metadata store.
+	 *  
+	 * @return List of supported reference types 
+	 */
+	public List<String> getReferenceTypes();
+
+	/**
+	 * Retrieve the parent object of a given object stored in the metadata store.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return Parent object of the metadata object
+	 */
+	public MetaDataObject getParent(MetaDataObject metaDataObject);
+
+	/**
+	 * Retrieve the child objects of a given object stored in the metadata store.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of child objects objects referenced by the given metadata object
+	 */
+	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject);
+
+	/**
+	 * Retrieve data file objects referenced by a data file folder object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of data file objects
+	 */
+	public List<DataFile> getDataFiles(DataFileFolder folder);
+
+	/**
+	 * Retrieve data file folder objects referenced by a data file folder object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of data file folder objects
+	 */
+	public List<DataFileFolder> getDataFileFolders(DataFileFolder folder);
+
+	/**
+	 * Retrieve schema objects referenced by a database object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of schema objects
+	 */
+	public List<Schema> getSchemas(Database database);
+
+	/**
+	 * Retrieve table objects referenced by a schema object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of table objects
+	 */
+	public List<Table> getTables(Schema schema);
+
+	/**
+	 * Retrieve column objects referenced by a table object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of column objects
+	 */
+	public List<Column> getColumns(RelationalDataSet relationalDataSet);
+
+	/**
+	 * Retrieve connection objects referenced by a data store object.
+	 *  
+	 * @param metaDataObject Given metadata object
+	 * @return List of connection objects
+	 */
+	public List<Connection> getConnections(DataStore dataStore);
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java
new file mode 100755
index 0000000..9ad68bf
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreBase.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+
+/**
+ * Common base that may be used for any metadata store implementation.
+ * 
+ * 
+ */
+public abstract class MetadataStoreBase implements MetadataStore {
+
+	public static final String ODF_CONNECTIONS_REFERENCE = "CONNECTIONS";
+	public static final String ODF_COLUMNS_REFERENCE = "COLUMNS";
+	public static final String ODF_DATAFILEFOLDERS_REFERENCE = "DATAFILEFOLDERS";
+	public static final String ODF_DATAFILES_REFERENCE = "DATAFILES";
+	public static final String ODF_SCHEMAS_REFERENCE = "SCHEMAS";
+	public static final String ODF_TABLES_REFERENCE = "TABLES";
+
+	protected abstract <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type);
+
+	@Override
+	public List<String> getReferenceTypes() {
+		List<String> result = new ArrayList<String>();
+		result.add(ODF_CONNECTIONS_REFERENCE);
+		result.add(ODF_COLUMNS_REFERENCE);
+		result.add(ODF_DATAFILEFOLDERS_REFERENCE);
+		result.add(ODF_DATAFILES_REFERENCE);
+		result.add(ODF_SCHEMAS_REFERENCE);
+		result.add(ODF_TABLES_REFERENCE);
+		return result;
+	}
+
+	@Override
+	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject) {
+		return getReferences(attributeName, metaDataObject, MetaDataObject.class);
+	}
+
+	@Override
+	public List<DataFile> getDataFiles(DataFileFolder folder) {
+		return getReferences(ODF_DATAFILES_REFERENCE, folder, DataFile.class);
+	}
+
+	@Override
+	public List<DataFileFolder> getDataFileFolders(DataFileFolder folder) {
+		return getReferences(ODF_DATAFILEFOLDERS_REFERENCE, folder, DataFileFolder.class);
+	}
+
+	@Override
+	public List<Schema> getSchemas(Database database) {
+		return getReferences(ODF_SCHEMAS_REFERENCE, database, Schema.class);
+	}
+
+	@Override
+	public List<Table> getTables(Schema schema) {
+		return getReferences(ODF_TABLES_REFERENCE, schema, Table.class);
+	}
+
+	@Override
+	public List<Column> getColumns(RelationalDataSet relationalDataSet) {
+		return getReferences(ODF_COLUMNS_REFERENCE, relationalDataSet, Column.class);
+	}
+
+	@Override
+	public List<Connection> getConnections(DataStore dataStore) {
+		return getReferences(ODF_CONNECTIONS_REFERENCE, dataStore, Connection.class);
+	}
+
+	@Override
+	public ConnectionStatus testConnection() {
+		return ConnectionStatus.OK;
+	}
+
+	@Override
+	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject) {
+		List<MetaDataObject> result = new ArrayList<MetaDataObject>();
+		for (String referenceType : getReferenceTypes()) {
+			for (MetaDataObject ref : getReferences(referenceType, metaDataObject, MetaDataObject.class)) {
+				if (!result.contains(ref)) {
+					result.add(ref);
+				}
+			}
+		}
+		return result;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java
new file mode 100755
index 0000000..7c84a61
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/MetadataStoreException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+public class MetadataStoreException extends RuntimeException {
+
+	private static final long serialVersionUID = -8509622412001869582L;
+
+	public MetadataStoreException() {
+		super();
+	}
+
+	public MetadataStoreException(String message, Throwable cause) {
+		super(message, cause);
+	}
+
+	public MetadataStoreException(String message) {
+		super(message);
+	}
+
+	public MetadataStoreException(Throwable cause) {
+		super(cause);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java
new file mode 100755
index 0000000..5601614
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RESTMetadataStoreHelper.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.connectivity.RESTClientManager;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+
+public class RESTMetadataStoreHelper {
+
+	static Logger logger = Logger.getLogger(RESTMetadataStoreHelper.class.getName());
+
+	/**
+	 * Return a ConnectionStatus object assuming that the URI is static in the sense that
+	 * the metadata store is unreachable if the URI cannot be reached.
+	 */
+	public static MetadataStore.ConnectionStatus testConnectionForStaticURL(RESTClientManager client, String uri) {
+		try {
+			Response resp = client.getAuthenticatedExecutor().execute(Request.Get(uri));
+			HttpResponse httpResponse = resp.returnResponse();
+			switch (httpResponse.getStatusLine().getStatusCode()) {
+			case HttpStatus.SC_NOT_FOUND:
+				return MetadataStore.ConnectionStatus.UNREACHABLE;
+			case HttpStatus.SC_OK:
+				return MetadataStore.ConnectionStatus.OK;
+			default:
+				;
+			}
+		} catch (Exception e) {
+			logger.log(Level.INFO, "Connection failed", e);
+		}
+		return MetadataStore.ConnectionStatus.UNKOWN_ERROR;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java
new file mode 100755
index 0000000..d48b6fe
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/ReferenceCache.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+
+/**
+ * This class is used to cache the materialized version of a metadata reference, in order to reduce the number of retrievals required
+ *
+ */
+public class ReferenceCache {
+
+	private Annotation annotation;
+	private Column oMColumn;
+	private DataSet oMDataSet;
+
+	public Column getColumn() {
+		return oMColumn;
+	}
+
+	public void setColumn(Column oMColumn) {
+		this.oMColumn = oMColumn;
+	}
+
+	public DataSet getDataSet() {
+		return oMDataSet;
+	}
+
+	public void setDataSet(DataSet oMDataSet) {
+		this.oMDataSet = oMDataSet;
+	}
+
+	public Annotation getAnnotation() {
+		return annotation;
+	}
+
+	public void setAnnotation(Annotation annotation) {
+		this.annotation = annotation;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java
new file mode 100755
index 0000000..3567c1d
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/RemoteMetadataStore.java
@@ -0,0 +1,385 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLEncoder;
+import java.security.GeneralSecurityException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Hashtable;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.wink.json4j.JSON;
+import org.apache.wink.json4j.JSONArray;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.connectivity.RESTClientManager;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.json.JSONUtils;
+
+// TODO properly escape all URLs when constructed as string concatenation
+
+/**
+ * 
+ * A MetadataStore to access metadata via an ODF instance
+ *
+ */
+public class RemoteMetadataStore extends MetadataStoreBase implements MetadataStore {
+	private Logger logger = Logger.getLogger(RemoteMetadataStore.class.getName());
+
+	private String odfUrl;
+	private String odfUser;
+
+	private Properties mdsProps = null;
+	
+	// if this is true, null repository Ids are ok for all MetaDataObjectReference objects
+	private boolean isDefaultStore = true;
+
+	private RESTClientManager restClient;
+
+	static String ODF_API_INFIX = "/odf/api/v1";
+
+	private void constructThis(String odfUrl, String odfUser, String odfPassword, boolean isDefaultStore) throws URISyntaxException {
+		this.odfUrl = odfUrl;
+		this.odfUser = odfUser;
+		this.restClient = new RESTClientManager(new URI(odfUrl), odfUser, odfPassword);
+		this.isDefaultStore = isDefaultStore;
+	}
+
+	public RemoteMetadataStore(String odfUrl, String odfUser, String odfPassword, boolean isDefaultStore) throws URISyntaxException, MetadataStoreException {
+		constructThis(odfUrl, odfUser, odfPassword, isDefaultStore);
+	}
+
+	/**
+	 * check if the reference belongs to this repository. Throw exception if not.
+	 */
+	void checkReference(MetaDataObjectReference reference) {
+		if (reference == null) {
+			throw new MetadataStoreException("Reference cannot be null");
+		}
+		if (reference.getRepositoryId() == null) {
+			if (!isDefaultStore) {
+				throw new MetadataStoreException("Repository ID is not set on the reference.");
+			}
+		} else {
+			if (!reference.getRepositoryId().equals(this.getRepositoryId())) {
+				throw new MetadataStoreException(MessageFormat.format("Repository ID ''{0}'' of reference does not match the one of this repository ''{1}''",
+						new Object[] { reference.getRepositoryId(), getRepositoryId() }));
+			}
+		}
+	}
+	
+	/**
+	 * check if the ODF metadata API can be reached. Throw exception if not.
+	 */
+	private void checkConnectionToMetadataAPI() {
+		MetadataStore.ConnectionStatus connStatus = testConnection();
+		if (connStatus.equals(MetadataStore.ConnectionStatus.UNREACHABLE)) {
+			throw new MetadataStoreException("Internal API for metadata store cannot be reached. Make sure that the discovery service has access to the following URL: " + odfUrl);
+		} else if (connStatus.equals(MetadataStore.ConnectionStatus.AUTHORIZATION_FAILED)) {
+			String messageDetail ="";
+			if (this.odfUser.isEmpty()) {
+				messageDetail = " Make sure to connect to the discovery service securely through https.";
+				//Note that ODF user id and password are only provided if the connection to the service is secure
+			}
+			throw new MetadataStoreException("Autorization failure when accessing API of internal metadata store." + messageDetail);
+		}
+	}
+
+	@Override
+	public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
+		throw new UnsupportedOperationException("This method is not available in the remote implementation of the Metadata store.");
+	};
+
+	@Override
+	public MetaDataObject retrieve(MetaDataObjectReference reference) {
+		checkReference(reference);
+		checkConnectionToMetadataAPI();
+		try {
+			String resource = odfUrl + ODF_API_INFIX + "/metadata/asset/" + URLEncoder.encode(JSONUtils.toJSON(reference), "UTF-8");
+			logger.log(Level.FINEST, "Object reference to be retrieved ''{0}''.", reference.toString());
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code == HttpStatus.SC_NOT_FOUND) {
+				return null;
+			}
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Retrieval of object ''{0}'' failed: HTTP request status: ''{1}'', {2}",
+						new Object[] { JSONUtils.toJSON(reference), statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			} else {
+				JSONObject mdo = (JSONObject) JSON.parse(httpResponse.getEntity().getContent());
+				mdo.remove("annotations");
+				MetaDataObject result = JSONUtils.fromJSON(mdo.write(), MetaDataObject.class);
+				if (result.getReference() == null) {
+					// An empty JSON documents indicates that the result should be null.
+					result = null;
+				}
+				logger.log(Level.FINEST, "Retrieved metadata object: ''{0}''.", result);
+				return result;
+			}
+		} catch (GeneralSecurityException | IOException | JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+	
+	@Override
+	public Properties getProperties() {
+		if (this.mdsProps != null) {
+			return this.mdsProps; 
+		} else {
+			checkConnectionToMetadataAPI();
+			try {
+				String resource = odfUrl + ODF_API_INFIX + "/metadata";
+				Executor executor = this.restClient.getAuthenticatedExecutor();
+				HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
+				StatusLine statusLine = httpResponse.getStatusLine();
+				int code = statusLine.getStatusCode();
+				InputStream is = httpResponse.getEntity().getContent();
+				String response = JSONUtils.getInputStreamAsString(is, "UTF-8");
+				is.close();
+				if (code != HttpStatus.SC_OK) {
+					String msg = MessageFormat.format("Retrieval of metadata store properties at ''{3}'' failed: HTTP request status: ''{0}'', {1}, details: {2}",
+							new Object[] { code, statusLine.getReasonPhrase(), response,  resource});
+					throw new MetadataStoreException(msg);
+				} else {
+					this.mdsProps = new Properties();
+					JSONObject jo = new JSONObject(response);
+					for (Object key : jo.keySet()) {
+						this.mdsProps.put((String) key, (String) jo.get(key));
+					}
+					return this.mdsProps;
+				}
+			} catch (GeneralSecurityException | IOException | JSONException exc) {
+				logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+				throw new MetadataStoreException(exc);
+			}			
+		}
+	}
+
+	@Override
+	public List<MetaDataObjectReference> search(String query) {
+		checkConnectionToMetadataAPI();
+		try {
+			logger.log(Level.FINE, "Metadata search term: ''{0}''.", query);
+			URIBuilder uri = new URIBuilder(odfUrl + ODF_API_INFIX + "/metadata/search")
+					.addParameter("query", query)
+					.addParameter("resulttype", "references");
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Get(uri.build())).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
+			}
+			InputStream is = httpResponse.getEntity().getContent();
+			JSONArray objReferencesJson = new JSONArray(is);
+			is.close();
+			logger.log(Level.FINEST, "Metadata search response: ''{0}''.", objReferencesJson.write());
+			List<MetaDataObjectReference> resultMDORs = new ArrayList<>();
+			for (Object ref : objReferencesJson) {
+				MetaDataObjectReference objRef = JSONUtils.fromJSON(((JSONObject) ref).write(), MetaDataObjectReference.class);
+				resultMDORs.add(objRef);
+			}			
+			return resultMDORs;
+		} catch (GeneralSecurityException | IOException | URISyntaxException | JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to the metadata store.", exc);
+			throw new MetadataStoreException(exc);
+		}
+
+	}
+
+	@Override
+	public String getRepositoryId() {
+		Hashtable<Object, Object> mdsProps = (Hashtable<Object, Object>) this.getProperties();
+		if (mdsProps.get(STORE_PROPERTY_ID) != null) {
+			return (String) mdsProps.get(STORE_PROPERTY_ID);
+		} else {
+			throw new MetadataStoreException("Property " + STORE_PROPERTY_ID + " is missing from metadata store properties ''" + mdsProps.toString() + "''.");
+		}
+	}
+
+	@Override
+	public MetadataStore.ConnectionStatus testConnection() {
+		return RESTMetadataStoreHelper.testConnectionForStaticURL(restClient, odfUrl);
+	}
+
+	@Override
+	public void createSampleData() {
+		checkConnectionToMetadataAPI();
+		try {
+			String resource = odfUrl + ODF_API_INFIX + "/metadata/sampledata";
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Create sample data failed: HTTP request status: ''{1}'', {2}",
+						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			}
+		} catch (GeneralSecurityException | IOException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	@Override
+	public void resetAllData() {
+		checkConnectionToMetadataAPI();
+		try {
+			String resource = odfUrl + ODF_API_INFIX + "/metadata/resetalldata";
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Post(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Reset all data failed: HTTP request status: ''{1}'', {2}",
+						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			}
+		} catch (GeneralSecurityException | IOException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	@Override
+	public MetadataQueryBuilder newQueryBuilder() {
+		String repoType = getProperties().getProperty(STORE_PROPERTY_TYPE);
+		if ("atlas".equals(repoType)) {
+			return new AtlasMetadataQueryBuilder();
+		} else if ("default".equals(repoType)) {
+			return new DefaultMetadataQueryBuilder();
+		}
+		throw new RuntimeException(MessageFormat.format("No query builder exists for the repository type ''{0}''", repoType));
+	}
+
+	@Override
+	public AnnotationPropagator getAnnotationPropagator() {
+		throw new UnsupportedOperationException("This method is not available in the remote implementation of the Metadata store.");
+	}
+
+	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type){
+		String objectId = metaDataObject.getReference().getId();
+		checkConnectionToMetadataAPI();
+		try {
+			String resource = odfUrl + ODF_API_INFIX + "/metadata/asset/"
+				+ URLEncoder.encode(JSONUtils.toJSON(metaDataObject.getReference()), "UTF-8")
+				+ "/" + URLEncoder.encode(attributeName.toLowerCase(), "UTF-8");
+			logger.log(Level.FINEST, "Retrieving references of type ''{0}'' from metadata object id ''{1}''.", new Object[] { attributeName, objectId });
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code == HttpStatus.SC_NOT_FOUND) {
+				return null;
+			}
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Retrieving references of type ''{0}'' of object id ''{1}'' failed: HTTP request status: ''{2}'', {3}",
+						new Object[] { attributeName, objectId, statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			} else {
+				InputStream is = httpResponse.getEntity().getContent();
+				JSONArray objReferencesJson = new JSONArray(is);
+				is.close();
+				logger.log(Level.FINEST, "Get references response: ''{0}''.", objReferencesJson.write());
+				List<T> referencedObjects = new ArrayList<T>();
+				for (Object ref : objReferencesJson) {
+					T obj = JSONUtils.fromJSON(((JSONObject) ref).write(), type);
+					referencedObjects.add(obj);
+				}
+				return referencedObjects;
+			}
+		} catch (GeneralSecurityException | IOException | JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	@Override
+	public List<MetaDataObject> getReferences(String attributeName, MetaDataObject metaDataObject){
+		return getReferences(attributeName, metaDataObject, MetaDataObject.class);
+	}
+
+	@Override
+	public List<String> getReferenceTypes(){
+		checkConnectionToMetadataAPI();
+		try {
+			String resource = odfUrl + ODF_API_INFIX + "/metadata/referencetypes";
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = executor.execute(Request.Get(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code == HttpStatus.SC_NOT_FOUND) {
+				return null;
+			}
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Retrieving reference type names failed: HTTP request status: ''{1}'', {2}",
+						new Object[] { statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			} else {
+				InputStream is = httpResponse.getEntity().getContent();
+				JSONArray objReferencesJson = new JSONArray(is);
+				is.close();
+				logger.log(Level.FINEST, "Get reference types response: ''{0}''.", objReferencesJson.write());
+				List<String> referenceTypeNames = new ArrayList<String>();
+				for (Object ref : objReferencesJson) {
+					String obj = JSONUtils.fromJSON(((JSONObject) ref).write(), String.class);
+					referenceTypeNames.add(obj);
+				}			
+				return referenceTypeNames;
+			}
+		} catch (GeneralSecurityException | IOException | JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting the metadata store", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	@Override
+	public MetaDataObject getParent(MetaDataObject metaDataObject){
+		List<MetaDataObject> parentList = getReferences(InternalMetaDataUtils.ODF_PARENT_REFERENCE, metaDataObject, MetaDataObject.class);
+		if (parentList.size() == 1) {
+			return parentList.get(0);
+		} else if (parentList.size() == 0) {
+			return null;
+		}
+		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
+		throw new MetadataStoreException(errorMessage);
+	}
+
+	@Override
+	public List<MetaDataObject> getChildren(MetaDataObject metaDataObject){
+		return getReferences(InternalMetaDataUtils.ODF_CHILDREN_REFERENCE, metaDataObject, MetaDataObject.class);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java
new file mode 100755
index 0000000..5de5f12
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/StoredMetaDataObject.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * Internal representation of a metadata object that is used by the @see MetaDataCache
+ * In addition to the object itself this class contains all references of the object. 
+ * 
+ * 
+ */
+@ApiModel(description="Internal representation of a metadata object in the metadata cache.")
+public class StoredMetaDataObject {
+	@ApiModelProperty(value="Actual cached metadata object", readOnly=false, required=true)
+	private MetaDataObject metaDataObject;
+
+	@ApiModelProperty(value="Map of all references of the cached metadata object containing one reference list for each type of reference", readOnly=false, required=true)
+	private HashMap<String, List<MetaDataObjectReference>> referenceMap;
+
+	public void setMetaDataObject(MetaDataObject metaDataObject) {
+		this.metaDataObject = metaDataObject;
+	}
+
+	public MetaDataObject getMetaDataObject() {
+		return this.metaDataObject;
+	}
+
+	public StoredMetaDataObject() {
+	}
+
+	public StoredMetaDataObject(MetaDataObject metaDataObject) {
+		this.metaDataObject = metaDataObject;
+		this.referenceMap = new HashMap<String, List<MetaDataObjectReference>>();
+	}
+
+	public void setReferencesMap(HashMap<String, List<MetaDataObjectReference>> referenceMap) {
+		this.referenceMap = referenceMap;
+	}
+
+	public HashMap<String, List<MetaDataObjectReference>> getReferenceMap() {
+		return this.referenceMap;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java
new file mode 100755
index 0000000..a6de68e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/UnknownMetaDataObject.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownMetaDataObject extends MetaDataObject {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java
new file mode 100755
index 0000000..feaef91
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImportResult.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.importer;
+
+import java.util.List;
+
+public class JDBCMetadataImportResult {
+	private String databaseName;
+	private List<String> tableNames;
+	private String dbRef;
+	
+	public JDBCMetadataImportResult(String databaseName, String dbId, List<String> tableNames) {
+		super();
+		this.databaseName = databaseName;
+		this.tableNames = tableNames;
+		this.dbRef = dbId;
+	}
+	
+	public String getDBId() {
+		return this.dbRef;
+	}
+
+	public String getDatabaseName() {
+		return databaseName;
+	}
+
+	public List<String> getTableNames() {
+		return tableNames;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java
new file mode 100755
index 0000000..2127ce6
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/JDBCMetadataImporter.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.importer;
+
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+
+/**
+ * Interface of the utility that imports metadata from JDBC data sources into the ODF metadata store.
+ * 
+ */
+public interface JDBCMetadataImporter {
+	
+	/**
+	 * Import metadata of one or multiple relational tables into the ODF metadata store, along with the corresponding
+	 * database and connection information.
+	 * 
+	 * @param connection Connection to the JDBC data soure
+	 * @param dbName Database name
+	 * @param schemaPattern Database schema name or pattern
+	 * @param tableNamePattern Table name or pattern
+	 * @return Object containing the raw results of the import operation
+	 */
+	public JDBCMetadataImportResult importTables(JDBCConnection connection, String dbName, String schemaPattern, String tableNamePattern);
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java
new file mode 100755
index 0000000..3e5cba3
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/importer/MetadataImportException.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.importer;
+
+public class MetadataImportException extends RuntimeException {
+
+	private static final long serialVersionUID = -3502239943338011231L;
+
+	public MetadataImportException() {
+		super();
+	}
+
+	public MetadataImportException(String message) {
+		super(message);
+	}
+
+	public MetadataImportException(Throwable cause) {
+		super(cause);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java
new file mode 100755
index 0000000..b25ad8b
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Annotation.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+// JSON
+/**
+ * This class represents a result of a discovery service analysis on a single metadata object.
+ * By extending this class new annotation types for new discovery services can be created in order to provide additional information
+ *
+ */
+public abstract class Annotation extends MetaDataObject {
+	
+	private String annotationType = this.getClass().getSimpleName().replace('$', '_');
+	private String analysisRun;
+	private String jsonProperties;
+	private String summary;
+
+	public String getAnnotationType() {
+		return annotationType;
+	}
+
+	public void setAnnotationType(String annotationType) {
+		this.annotationType = annotationType;
+	}
+
+	public String getJsonProperties() {
+		return jsonProperties;
+	}
+
+	public void setJsonProperties(String jsonProperties) {
+		this.jsonProperties = jsonProperties;
+	}
+
+	public String getAnalysisRun() {
+		return analysisRun;
+	}
+
+	public void setAnalysisRun(String analysisRun) {
+		this.analysisRun = analysisRun;
+	}
+
+	public String getSummary() {
+		return summary;
+	}
+
+	public void setSummary(String summary) {
+		this.summary = summary;
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java
new file mode 100755
index 0000000..cbc801f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/BusinessTerm.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+public class BusinessTerm extends MetaDataObject {
+	
+	private List<String> abbreviations;
+	private String example;
+	private String usage;
+	
+	public List<String> getAbbreviations() {
+		return abbreviations;
+	}
+	public void setAbbreviations(List<String> abbreviations) {
+		this.abbreviations = abbreviations;
+	}
+	public String getExample() {
+		return example;
+	}
+	public void setExample(String example) {
+		this.example = example;
+	}
+	public String getUsage() {
+		return usage;
+	}
+	public void setUsage(String usage) {
+		this.usage = usage;
+	}
+
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java
new file mode 100755
index 0000000..5bbf731
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/CachedMetadataStore.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.InternalMetadataStoreBase;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
+import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+
+/**
+ * In-memory metadata cache to be used by discovery services that do not have access to the metadata store.
+ * The cache uses the same interface as the metadata store but does not support all of its methods.
+ * 
+ * 
+ */
+public class CachedMetadataStore extends InternalMetadataStoreBase implements MetadataStore {
+	private Logger logger = Logger.getLogger(CachedMetadataStore.class.getName());
+	private static final String METADATA_STORE_ID = "ODF_METADATA_CACHE";
+	private static final String STORE_PROPERTY_TYPE = "cache";
+	private static final String STORE_PROPERTY_DESCRIPTION = "ODF metadata cache";
+
+	protected Object accessLock = new Object();
+	private HashMap<String, StoredMetaDataObject> objectStore =  new HashMap<String, StoredMetaDataObject>();
+	private HashMap<String, ConnectionInfo> connectionInfoStore = new HashMap<String, ConnectionInfo>();
+
+	public CachedMetadataStore(MetaDataCache metaDataCache) {
+		for (StoredMetaDataObject obj : metaDataCache.getMetaDataObjects()) {
+			getObjects().put(obj.getMetaDataObject().getReference().getId(), obj);
+			logger.log(Level.FINER, "Added object with name ''{0}'' to metadata cache.", obj.getMetaDataObject().getName());
+		}
+		for (ConnectionInfo conInfo : metaDataCache.getConnectionInfoObjects()) {
+			connectionInfoStore.put(conInfo.getAssetReference().getId(), conInfo);
+			logger.log(Level.FINER, "Added connection info object for metadata object id ''{0}'' to metadata cache.", conInfo.getAssetReference().getId());
+		}
+	}
+
+	protected Object getAccessLock() {
+		return accessLock;
+	}
+
+	public static MetaDataCache retrieveMetaDataCache(MetadataStore mds, MetaDataObject metaDataObject) {
+		MetaDataCache cache = new MetaDataCache();
+		populateMetaDataCache(cache, mds, metaDataObject);
+		return cache;
+	}
+	/**
+	 * Internal methods that recursively populates the metadata store with all child objects of a given metadata object.
+	 * If there is a @see ConnectionInfo object available for a cached metadata object
+	 * it will be added to the cache as well.
+	 *  
+	 * @param metaDataCache Metadata cache to be populated
+	 * @param mds Metadata store to retrieve the cached objects from
+	 * @param metaDataObject Given metadata object
+	 */
+	private static void populateMetaDataCache(MetaDataCache metaDataCache, MetadataStore mds, MetaDataObject metaDataObject) {
+		// Add current object
+		StoredMetaDataObject currentObject = new StoredMetaDataObject(metaDataObject);
+		for (String referenceType : mds.getReferenceTypes()) {
+			currentObject.getReferenceMap().put(referenceType, InternalMetaDataUtils.getReferenceList(mds.getReferences(referenceType, metaDataObject)));
+		}
+		metaDataCache.getMetaDataObjects().add(currentObject);
+		ConnectionInfo connectionInfo = mds.getConnectionInfo(metaDataObject);
+
+		// Connection info must be cached as well because it cannot be retrieved dynamically as required parent objects might be missing from cache
+		if (connectionInfo != null) {
+			metaDataCache.getConnectionInfoObjects().add(connectionInfo);
+		}
+
+		// Add child objects
+		for (MetaDataObject child : mds.getChildren(metaDataObject)) {
+			populateMetaDataCache(metaDataCache, mds, child);
+		}
+	}
+
+	protected HashMap<String, StoredMetaDataObject> getObjects() {
+		return objectStore;
+	}
+
+	@Override
+	public Properties getProperties() {
+		Properties props = new Properties();
+		props.put(MetadataStore.STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_DESCRIPTION);
+		props.put(MetadataStore.STORE_PROPERTY_TYPE, STORE_PROPERTY_TYPE);
+		props.put(STORE_PROPERTY_ID, METADATA_STORE_ID);
+		return props;
+	}
+
+	@Override
+	public void resetAllData() {
+		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
+	}
+
+	@Override
+	public String getRepositoryId() {
+		return METADATA_STORE_ID;
+	}
+
+	@Override
+	public ConnectionInfo getConnectionInfo(MetaDataObject metaDataObject) {
+		return connectionInfoStore.get(metaDataObject.getReference().getId());
+	}
+
+	@Override
+	public List<MetaDataObjectReference> search(String query) {
+		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
+	}
+
+	@Override
+	public void createSampleData() {
+		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
+	}
+
+	@Override
+	public AnnotationPropagator getAnnotationPropagator() {
+		throw new UnsupportedOperationException("Method not available in this implementation of the Metadata store.");
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java
new file mode 100755
index 0000000..8db6ec1
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ClassificationAnnotation.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+public class ClassificationAnnotation extends Annotation {
+	
+	private MetaDataObjectReference classifiedObject;
+	private List<MetaDataObjectReference> classifyingObjects;
+
+	public MetaDataObjectReference getClassifiedObject() {
+		return classifiedObject;
+	}
+	public void setClassifiedObject(MetaDataObjectReference classifiedObject) {
+		this.classifiedObject = classifiedObject;
+	}
+	
+	public List<MetaDataObjectReference> getClassifyingObjects() {
+		return classifyingObjects;
+	}
+	public void setClassifyingObjects(List<MetaDataObjectReference> classifyingObjects) {
+		this.classifyingObjects = classifyingObjects;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java
new file mode 100755
index 0000000..0ae4370
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Column.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * This class represents metadata of a column in a table
+ *
+ */
+public class Column extends MetaDataObject {
+
+	private String dataType;
+
+	public String getDataType() {
+		return dataType;
+	}
+
+	public void setDataType(String dataType) {
+		this.dataType = dataType;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java
new file mode 100755
index 0000000..9a42fc2
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Connection.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+public abstract class Connection extends MetaDataObject {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java
new file mode 100755
index 0000000..4884105
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ConnectionInfo.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * 
+ * General connecting info that must be extended for individual data sources 
+ *
+ */
+@ApiModel(description="Object containing the information required in order to access the data behind a specific metadata object.")
+public abstract class ConnectionInfo {
+
+	@ApiModelProperty(value="Available connections for accessing the data behind the metadata object", readOnly=true, required=true)
+	private List<Connection> connections;
+
+	@ApiModelProperty(value="Reference to the actual metadata object", readOnly=true, required=true)
+	private MetaDataObjectReference assetReference;
+
+	@ApiModelProperty(value="Java class represeting the connection info object", hidden=true)
+	private String javaClass = this.getClass().getName(); // don't use JsonTypeInfo 
+
+	public List<Connection> getConnections() {
+		return this.connections;
+	}
+
+	public void setConnections(List<Connection> connections) {
+		this.connections = connections;
+	}
+
+	public MetaDataObjectReference getAssetReference() {
+		return this.assetReference;
+	}
+
+	public void setAssetReference(MetaDataObjectReference assetReference) {
+		this.assetReference = assetReference;
+	}
+
+	public String getJavaClass() {
+		return javaClass;
+	}
+
+	public void setJavaClass(String javaClass) {
+		this.javaClass = javaClass;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java
new file mode 100755
index 0000000..2a1ad7f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFile.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * This class is a metadataobject for a CSV file located at a specific URL
+ *
+ */
+public class DataFile extends RelationalDataSet {
+	private String encoding = "UTF-8";
+	private String urlString;
+
+	public String getUrlString() {
+		return urlString;
+	}
+
+	public void setUrlString(String url) {
+		this.urlString = url;
+	}
+
+	public String getEncoding() {
+		return encoding;
+	}
+
+	public void setEncoding(String encoding) {
+		this.encoding = encoding;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java
new file mode 100755
index 0000000..5e2a132
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataFileFolder.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+public class DataFileFolder extends MetaDataObject {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java
new file mode 100755
index 0000000..0b75eae
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataSet.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import io.swagger.annotations.ApiModel;
+
+// JSON
+@ApiModel(description="Metadata object representing a generic data set.")
+public abstract class DataSet extends MetaDataObject {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java
new file mode 100755
index 0000000..f4a11bd
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/DataStore.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+/**
+ * This class represents a metadataobject that references other metadataobjects
+ *
+ */
+public abstract class DataStore extends MetaDataObject {
+	private List<MetaDataObjectReference> connections;
+
+	public List<MetaDataObjectReference> getConnections() {
+		return connections;
+	}
+
+	public void setConnections(List<MetaDataObjectReference> connections) {
+		this.connections = connections;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java
new file mode 100755
index 0000000..bbf7a0a
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Database.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * This class is a metadata object for a relational database
+ *
+ */
+public class Database extends DataStore {
+	private String dbType;
+
+	public String getDbType() {
+		return dbType;
+	}
+
+	public void setDbType(String dbType) {
+		this.dbType = dbType;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java
new file mode 100755
index 0000000..ed6c0ef
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Document.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * 
+ * This class represents a text document stored at a URL
+ *
+ */
+public class Document extends DataSet {
+	private String encoding = "UTF-8";
+	private String urlString;
+
+	public String getUrlString() {
+		return urlString;
+	}
+
+	public void setUrlString(String url) {
+		this.urlString = url;
+	}
+
+	public String getEncoding() {
+		return encoding;
+	}
+
+	public void setEncoding(String encoding) {
+		this.encoding = encoding;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java
new file mode 100755
index 0000000..0a96fb1
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnection.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * This class represents a JDCB connection that can be used to access the contents of a dataset
+ *
+ */
+public class JDBCConnection extends Connection {
+	private String jdbcConnectionString;
+	private String user;
+	private String password;
+
+	public String getJdbcConnectionString() {
+		return jdbcConnectionString;
+	}
+
+	public void setJdbcConnectionString(String jdbcConnectionString) {
+		this.jdbcConnectionString = jdbcConnectionString;
+	}
+
+	public String getUser() {
+		return user;
+	}
+
+	public void setUser(String user) {
+		this.user = user;
+	}
+
+	public String getPassword() {
+		return password;
+	}
+
+	public void setPassword(String password) {
+		this.password = password;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java
new file mode 100755
index 0000000..131f3f9
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/JDBCConnectionInfo.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * 
+ * Provides all information for connecting to a JDBC data source
+ *
+ */
+@ApiModel(description="Object containing the information required to access a specific JDBC table.")
+public class JDBCConnectionInfo extends ConnectionInfo {
+
+	@ApiModelProperty(value="Table name", readOnly=true, required=true)
+	private String tableName;
+
+	@ApiModelProperty(value="Schema name", readOnly=true, required=true)
+	private String schemaName;
+
+	public String getTableName() {
+		return this.tableName;
+	}
+
+	public void setTableName(String tableName) {
+		this.tableName = tableName;
+	}
+
+	public String getSchemaName() {
+		return this.schemaName;
+	}
+
+	public void setSchemaName(String schemaName) {
+		this.schemaName = schemaName;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java
new file mode 100755
index 0000000..ff2f47f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataCache.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+
+import io.swagger.annotations.ApiModelProperty;
+
+public class MetaDataCache {
+
+	@ApiModelProperty(value="Cached metadata objects to be used by discovery services if access to the metadata store is not available", required=false)
+	private List<StoredMetaDataObject> metaDataObjects = new ArrayList<StoredMetaDataObject>();
+
+	@ApiModelProperty(value="Cached connection info objects to be used by discovery services if access to the metadata store is not available", required=false)
+	private List<ConnectionInfo> connectionInfoObjects = new ArrayList<ConnectionInfo>();
+
+	public List<StoredMetaDataObject> getMetaDataObjects() {
+		return metaDataObjects;
+	}
+
+	public void setMetaDataObjects(List<StoredMetaDataObject> metaDataObjects) {
+		this.metaDataObjects = metaDataObjects;
+	}
+
+	public List<ConnectionInfo> getConnectionInfoObjects() {
+		return this.connectionInfoObjects;
+	}
+
+	public void setConnectionInfoObjects(List<ConnectionInfo> connectionInfoObjects) {
+		this.connectionInfoObjects = connectionInfoObjects;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java
new file mode 100755
index 0000000..6152d51
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/MetaDataObject.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+// JSON
+/**
+ * 
+ * A MetaDataObject is an object in a Metadata store, containing a reference describing its location and annotations that were created on this object
+ *
+ */
+@ApiModel(description="Metadata object representing a generic data set.")
+public abstract class MetaDataObject {
+
+	@ApiModelProperty(value="Reference to the object (generated)", readOnly=true, required=true)
+	private MetaDataObjectReference reference;
+
+	@ApiModelProperty(value="Description of the object", required=false)
+	private String description;
+
+	@ApiModelProperty(value="Name of the object", required=true)
+	private String name;
+
+	@ApiModelProperty(value="Java class represeting the object", hidden=true)
+	private String javaClass = this.getClass().getName(); // don't use JsonTypeInfo 
+	
+	private String originRef;
+	
+	private List<String> replicaRefs;
+
+	public String getDescription() {
+		return description;
+	}
+
+	public void setDescription(String description) {
+		this.description = description;
+	}
+
+	public String getName() {
+		return name;
+	}
+
+	public void setName(String name) {
+		this.name = name;
+	}
+
+	public MetaDataObjectReference getReference() {
+		return reference;
+	}
+
+	public void setReference(MetaDataObjectReference reference) {
+		this.reference = reference;
+	}
+
+	public String getJavaClass() {
+		return javaClass;
+	}
+
+	public void setJavaClass(String javaClass) {
+		this.javaClass = javaClass;
+	}
+
+	public String getOriginRef() {
+		return originRef;
+	}
+
+	public void setOriginRef(String originRef) {
+		this.originRef = originRef;
+	}
+
+	public List<String> getReplicaRefs() {
+		return replicaRefs;
+	}
+
+	public void setReplicaRefs(List<String> replicaRefs) {
+		this.replicaRefs = replicaRefs;
+	}
+	
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java
new file mode 100755
index 0000000..8e6fcca
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/ProfilingAnnotation.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+public class ProfilingAnnotation extends Annotation {
+
+	private MetaDataObjectReference profiledObject;
+
+	public MetaDataObjectReference getProfiledObject() {
+		return profiledObject;
+	}
+	public void setProfiledObject(MetaDataObjectReference annotatedObject) {
+		this.profiledObject = annotatedObject;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java
new file mode 100755
index 0000000..e8656c5
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationalDataSet.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+
+/**
+ * 
+ * This class represents a metadataobject that contains columns, eg. a database table or a CSV file
+ *
+ */
+public abstract class RelationalDataSet extends DataSet {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java
new file mode 100755
index 0000000..924dadf
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/RelationshipAnnotation.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+public class RelationshipAnnotation extends Annotation {
+	
+	private List<MetaDataObjectReference> relatedObjects;
+
+	public List<MetaDataObjectReference> getRelatedObjects() {
+		return relatedObjects;
+	}
+
+	public void setRelatedObjects(List<MetaDataObjectReference> relatedObjects) {
+		this.relatedObjects = relatedObjects;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java
new file mode 100755
index 0000000..e74bda4
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Schema.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+public class Schema extends MetaDataObject {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java
new file mode 100755
index 0000000..633706d
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/Table.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+/**
+ * 
+ * This class represents a database table metadataobject
+ *
+ */
+public class Table extends RelationalDataSet {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java
new file mode 100755
index 0000000..3a1a968
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnection.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownConnection extends Connection {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java
new file mode 100755
index 0000000..57da8d3
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownConnectionInfo.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownConnectionInfo extends ConnectionInfo {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java
new file mode 100755
index 0000000..948bf3f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataSet.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownDataSet extends DataSet {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java
new file mode 100755
index 0000000..d07537e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownDataStore.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownDataStore extends DataStore {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java
new file mode 100755
index 0000000..6993751
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/metadata/models/UnknownRelationalDataSet.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.metadata.models;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class UnknownRelationalDataSet extends RelationalDataSet {
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java
new file mode 100755
index 0000000..08ca741
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaConsumerConfig.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import org.apache.atlas.odf.api.settings.validation.EnumValidator;
+import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+/*
+ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
+ */
+public final class KafkaConsumerConfig {
+	/*
+	 * ############ !!!!!!!!!!!!!!!!!!! ###################
+	 * 
+	 * Because of a jackson defect, JsonProperty annotations must be on all properties AND their getters and setters!
+	 * 
+	 * https://github.com/FasterXML/jackson-module-scala/issues/197
+	 */
+
+	private String offsetsStorage;
+
+	private Long zookeeperSessionTimeoutMs;
+
+	private Long zookeeperConnectionTimeoutMs;
+
+	public String getOffsetsStorage() {
+		return offsetsStorage;
+	}
+
+	public void setOffsetsStorage(String offsetsStorage) {
+		this.offsetsStorage = offsetsStorage;
+	}
+
+	public Long getZookeeperSessionTimeoutMs() {
+		return zookeeperSessionTimeoutMs;
+	}
+
+	public void setZookeeperSessionTimeoutMs(Long zookeeperSessionTimeoutMs) {
+		this.zookeeperSessionTimeoutMs = zookeeperSessionTimeoutMs;
+	}
+
+	public Long getZookeeperConnectionTimeoutMs() {
+		return zookeeperConnectionTimeoutMs;
+	}
+
+	public void setZookeeperConnectionTimeoutMs(Long zookeeperConnectionTimeoutMs) {
+		this.zookeeperConnectionTimeoutMs = zookeeperConnectionTimeoutMs;
+	}
+
+	public void validate() throws ValidationException {
+		if (getOffsetsStorage() != null) {
+			new EnumValidator("kafka", "zookeeper").validate("KafkaConsumerConfig.offsetsStorage", this.offsetsStorage);
+		}
+		if (getZookeeperConnectionTimeoutMs() != null) {
+			new NumberPositiveValidator().validate("KafkaConsumerConfig.zookeeperConnectionTimeoutMs", this.zookeeperConnectionTimeoutMs);
+		}
+		if (getZookeeperSessionTimeoutMs() != null) {
+			new NumberPositiveValidator().validate("KafkaConsumerConfig.zookeeperSessionTimeoutMs", this.zookeeperSessionTimeoutMs);
+		}
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java
new file mode 100755
index 0000000..3c1725e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/KafkaMessagingConfiguration.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+public class KafkaMessagingConfiguration extends MessagingConfiguration {
+	private KafkaConsumerConfig kafkaConsumerConfig;
+
+	private Integer queueConsumerWaitMs;
+
+	private Integer kafkaBrokerTopicReplication;
+
+	public Integer getKafkaBrokerTopicReplication() {
+		return kafkaBrokerTopicReplication;
+	}
+
+	public void setKafkaBrokerTopicReplication(Integer kafkaBrokerTopicReplication) {
+		this.kafkaBrokerTopicReplication = kafkaBrokerTopicReplication;
+	}
+
+	public Integer getQueueConsumerWaitMs() {
+		return queueConsumerWaitMs;
+	}
+
+	public void setQueueConsumerWaitMs(Integer queueConsumerWaitMs) {
+		this.queueConsumerWaitMs = queueConsumerWaitMs;
+	}
+
+	public KafkaConsumerConfig getKafkaConsumerConfig() {
+		return kafkaConsumerConfig;
+	}
+
+	public void setKafkaConsumerConfig(KafkaConsumerConfig kafkaConsumerConfig) {
+		this.kafkaConsumerConfig = kafkaConsumerConfig;
+	}
+
+	public void validate() throws ValidationException {
+		super.validate();
+		if (this.getQueueConsumerWaitMs() != null) {
+			new NumberPositiveValidator().validate("ODFConfig.queueConsumerWaitMs", this.queueConsumerWaitMs);
+		}
+		if (this.getKafkaBrokerTopicReplication() != null) {
+			new NumberPositiveValidator().validate("ODFConfig.kafkaBrokerTopicReplication", this.kafkaBrokerTopicReplication);
+		}
+		if (this.getKafkaConsumerConfig() != null) {
+			this.kafkaConsumerConfig.validate();
+		}
+
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java
new file mode 100755
index 0000000..ba006e3
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/MessagingConfiguration.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import com.fasterxml.jackson.annotation.JsonTypeInfo;
+import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel(description="Messaging configuration to be used for queuing requests.")
+@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "type")
+public abstract class MessagingConfiguration {
+	@ApiModelProperty(value="Time analysis requests are retained on the queue in milliseconds")
+	private Long analysisRequestRetentionMs;
+	
+	public Long getAnalysisRequestRetentionMs() {
+		return analysisRequestRetentionMs;
+	}
+
+	public void setAnalysisRequestRetentionMs(Long analysisRequestRetentionMs) {
+		this.analysisRequestRetentionMs = analysisRequestRetentionMs;
+	}
+	
+	public void validate() throws ValidationException {
+		if (this.getAnalysisRequestRetentionMs() != null) {
+			new NumberPositiveValidator().validate("ODFConfig.analysisRequestRetentionMs", this.analysisRequestRetentionMs);
+		}
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java
new file mode 100755
index 0000000..2124c54
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/ODFSettings.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import java.util.Map;
+import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
+import org.apache.atlas.odf.api.settings.validation.StringNotEmptyValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/*
+ * 
+ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
+ * Boolean properties must be of type Boolean instead of boolean in order to support null values which are required for merging later!
+ *
+ */
+@ApiModel(description="General ODF settings.")
+public final class ODFSettings {
+
+	/*
+	 * ############ !!!!!!!!!!!!!!!!!!! ####################
+	 *
+	 * Because of a jackson defect, JsonProperty annotations must be on all properties AND their getters and setters!
+	 *
+	 * https://github.com/FasterXML/jackson-module-scala/issues/197
+	 */
+
+	@ApiModelProperty(value="Polling interval for checking whether a discovery service is still running")
+	private Integer discoveryServiceWatcherWaitMs;
+
+	@ApiModelProperty(value="Unique id of the ODF instance")
+	private String instanceId;
+
+	@ApiModelProperty(value="ODF URL passed to discovery services for remote access to the metadata API")
+	private String odfUrl;
+
+	@ApiModelProperty(value="ODF user id passed to discovery services for remote access to the metadata API")
+	private String odfUser;
+
+	@ApiModelProperty(value="ODF password passed to discovery services for remote access to the metadata API")
+	private String odfPassword;
+
+	@ApiModelProperty(value = "ATLAS setting indicating if events regarding newly imported data sets should be consumed from me")
+	private Boolean consumeMessageHubEvents;
+
+	@ApiModelProperty(value = "ATLAS Messagehub VCAP_SERVICES value from Bluemix,  e.g { \"messagehub\": [{\"name\" : \"...\",\n\"credentials\": {...}]")
+	private String atlasMessagehubVcap;
+
+	@ApiModelProperty(value="Indicates whether to reuse equivalent analysis requests that may be already queued rather that running the same analysis again")
+	private Boolean reuseRequests;
+
+	@ApiModelProperty(value="Messaging configuration to be used for queuing requests")
+	private MessagingConfiguration messagingConfiguration;
+
+	@ApiModelProperty(value="If set to true, ALL registered discovery services will be automatically issued when a new data set is imported")
+	private Boolean runAnalysisOnImport;
+
+	@ApiModelProperty(value="If set to true, ALL data sets will be automatically analyzed whenever a new discovery service is registered")
+	private Boolean runNewServicesOnRegistration;
+
+	@ApiModelProperty(value="User-defined configuration options for discovery services", required=true)
+	private Map<String, Object> userDefined;
+
+	@ApiModelProperty(value="Spark clusters to be used for running discovery services", required=true)
+	private SparkConfig sparkConfig;
+
+	@ApiModelProperty(value = "Set to true to propagate the created annotations for each analysis request to the metadata store")
+	private Boolean enableAnnotationPropagation;
+
+	public Boolean getEnableAnnotationPropagation() {
+		return enableAnnotationPropagation;
+	}
+
+	public void setEnableAnnotationPropagation(Boolean enableAnnotationPropagation) {
+		this.enableAnnotationPropagation = enableAnnotationPropagation;
+	}
+
+	public Boolean isReuseRequests() {
+		return reuseRequests;
+	}
+
+	public void setReuseRequests(Boolean reuseRequests) {
+		this.reuseRequests = reuseRequests;
+	}
+
+	public String getInstanceId() {
+		return this.instanceId;
+	}
+
+	public void setInstanceId(String instanceId) {
+		this.instanceId = instanceId;
+	}
+
+	public String getOdfUrl() {
+		return this.odfUrl;
+	}
+
+	public void setOdfUrl(String odfUrl) {
+		this.odfUrl = odfUrl;
+	}
+
+	public String getOdfUser() {
+		return this.odfUser;
+	}
+
+	public void setOdfUser(String odfUser) {
+		this.odfUser = odfUser;
+	}
+
+	public String getOdfPassword() {
+		return this.odfPassword;
+	}
+
+	public void setOdfPassword(String odfPassword) {
+		this.odfPassword = odfPassword;
+	}
+
+	public Integer getDiscoveryServiceWatcherWaitMs() {
+		return discoveryServiceWatcherWaitMs;
+	}
+
+	public void setDiscoveryServiceWatcherWaitMs(Integer discoveryServiceWatcherWaitMs) {
+		this.discoveryServiceWatcherWaitMs = discoveryServiceWatcherWaitMs;
+	}
+
+	public Boolean getRunAnalysisOnImport() {
+		return runAnalysisOnImport;
+	}
+
+	public void setRunAnalysisOnImport(Boolean runAnalysisOnImport) {
+		this.runAnalysisOnImport = runAnalysisOnImport;
+	}
+
+	public Boolean getRunNewServicesOnRegistration() {
+		return runNewServicesOnRegistration;
+	}
+
+	public void setRunNewServicesOnRegistration(Boolean runNewServicesOnRegistration) {
+		this.runNewServicesOnRegistration = runNewServicesOnRegistration;
+	}
+
+	public MessagingConfiguration getMessagingConfiguration() {
+		return messagingConfiguration;
+	}
+
+	public void setMessagingConfiguration(MessagingConfiguration messagingConfiguration) {
+		this.messagingConfiguration = messagingConfiguration;
+	}
+
+	public String getAtlasMessagehubVcap() {
+		return atlasMessagehubVcap;
+	}
+
+	public void setAtlasMessagehubVcap(String atlasMessagehubVcap) {
+		this.atlasMessagehubVcap = atlasMessagehubVcap;
+	}
+
+	public Map<String, Object> getUserDefined() {
+		return userDefined;
+	}
+
+	public Boolean getConsumeMessageHubEvents() {
+		return consumeMessageHubEvents;
+	}
+
+	public void setConsumeMessageHubEvents(Boolean consumeMessageHubEvents) {
+		this.consumeMessageHubEvents = consumeMessageHubEvents;
+	}
+
+	public void setUserDefined(Map<String, Object> userDefined) {
+		this.userDefined = userDefined;
+	}
+
+	public SparkConfig getSparkConfig() {
+		return sparkConfig;
+	}
+
+	public void setSparkConfig(SparkConfig sparkConfig) {
+		this.sparkConfig = sparkConfig;
+	}
+
+	public void validate() throws ValidationException {
+		new StringNotEmptyValidator().validate("ODFConfig.instanceId", instanceId);
+
+		if (this.getDiscoveryServiceWatcherWaitMs() != null) {
+			new NumberPositiveValidator().validate("ODFConfig.discoveryServiceWatcherWaitMs", this.discoveryServiceWatcherWaitMs);
+		}
+
+		if (this.messagingConfiguration != null) {
+			this.messagingConfiguration.validate();
+		}
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java
new file mode 100755
index 0000000..9c300b9
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SettingsManager.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+/**
+*
+* External Java API for reading and updating ODF settings
+*
+*/
+public interface SettingsManager {
+
+	/**
+	 * Retrieve Kafka consumer properties
+	 * @return Current Kafka consumer properties
+	 */
+	public Properties getKafkaConsumerProperties();
+
+	/**
+	 * Retrieve Kafka producer properties
+	 * @return Current Kafka producer properties
+	 */
+	public Properties getKafkaProducerProperties();
+
+	/**
+	 * Retrieve overall ODF settings including plain passwords
+	 * @return Current ODF settings
+	 */
+	public ODFSettings getODFSettings();
+
+	/**
+	 * Retrieve overall ODF settings with hidden passwords
+	 * @return Current ODF settings
+	 */
+	public ODFSettings getODFSettingsHidePasswords();
+
+	/**
+	 * Update ODF settings
+	 * 
+	 * Passwords provided as plain text will be encrypted. If HIDDEN_PASSWORD_IDENTIFIER
+	 * is provided instead of a password, the stored password will remain unchanged.
+	 * 
+	 * @param Updated ODF settings
+	 */
+	public void updateODFSettings(ODFSettings update) throws ValidationException;
+
+	/**
+	 * Reset ODF settings to the defaults
+	 */
+	public void resetODFSettings();
+
+	/**
+	 * Retrieve user defined ODF properties
+	 * @return Map of user defined ODF properties
+	 */
+	public Map<String, Object> getUserDefinedConfig();
+
+	/**
+	 * Update user defined ODF properties
+	 * @param Map of user defined ODF properties
+	 * @throws ValidationException
+	 */
+	public void updateUserDefined(Map<String, Object> update) throws ValidationException;
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java
new file mode 100755
index 0000000..5c3694c
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/SparkConfig.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonAnySetter;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+//JSON
+@ApiModel(description = "Configuration of Spark cluster.")
+public class SparkConfig {
+
+	@ApiModelProperty(value = "Master URL of the Spark cluster", required = true)
+	private String clusterMasterUrl = null;
+
+	@ApiModelProperty(value="Custom Spark configuration options", required=false)
+	private Map<String, Object> configs = new HashMap<>();
+
+	public String getClusterMasterUrl() {
+		return this.clusterMasterUrl;
+	}
+
+	public void setClusterMasterUrl(String clusterMasterUrl) {
+		this.clusterMasterUrl = clusterMasterUrl;
+	}
+
+	@JsonAnyGetter
+	public Map<String, Object> getConfigs() {
+		return this.configs;
+	}
+
+	@JsonAnySetter
+	public void setConfig(String name, Object value) {
+		this.configs.put(name, value);
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java
new file mode 100755
index 0000000..c6c365f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/EnumValidator.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+import java.text.MessageFormat;
+
+public class EnumValidator implements PropertyValidator {
+
+	String[] validValues = new String[0];
+
+	public EnumValidator(String... validValues) {
+		this.validValues = validValues;
+	}
+
+	@Override
+	public void validate(String property, Object value) throws ValidationException {
+		for (String valid : validValues) {
+			if (valid.equals(value)) {
+				return;
+			}
+		}
+
+		throw new ValidationException(property, MessageFormat.format("only the following values are allowed: ", validValues.toString()));
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java
new file mode 100755
index 0000000..ad2662e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ImplementationValidator.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+import java.text.MessageFormat;
+
+public class ImplementationValidator implements PropertyValidator {
+
+	public void validate(String property, Object value) throws ValidationException {
+		Class<?> implClass;
+		try {
+			implClass = this.getClass().getClassLoader().loadClass(String.valueOf(value));
+			Object o = implClass.newInstance();
+			o.toString();
+			return;
+		} catch (ClassNotFoundException e) {
+			e.printStackTrace();
+			throw new ValidationException(property, MessageFormat.format("Class {0} could not be found!", value));
+		} catch (IllegalAccessException e) {
+			e.printStackTrace();
+			throw new ValidationException(property, MessageFormat.format("Class {0} could not be accessed!", value));
+		} catch (InstantiationException e) {
+			e.printStackTrace();
+			throw new ValidationException(property, MessageFormat.format("Class {0} could not be instantiated!", value));
+		} catch (NoClassDefFoundError e) {
+			e.printStackTrace();
+			throw new ValidationException(property, MessageFormat.format("Class defintiion {0} could not be found!", value));
+		}
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java
new file mode 100755
index 0000000..3c09f07
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/NumberPositiveValidator.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+public class NumberPositiveValidator implements PropertyValidator {
+
+	public void validate(String property, Object value) throws ValidationException {
+		if (!(value instanceof Number)) {
+			throw new ValidationException("Only numbers are allowed!");
+		} else {
+			if (value instanceof Long && (long) value < 0) {
+				throw new ValidationException(property, "Only positive values are allowed!");
+			} else if (value instanceof Integer && (int) value < 0) {
+				throw new ValidationException(property, "Only positive values are allowed!");
+			} else if (value instanceof Double && (double) value < 0) {
+				throw new ValidationException(property, "Only positive values are allowed!");
+			}
+		}
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java
new file mode 100755
index 0000000..6acb902
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/PropertyValidator.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+public interface PropertyValidator {
+
+	public void validate(String property, Object value) throws ValidationException;
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java
new file mode 100755
index 0000000..c12cf59
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/StringNotEmptyValidator.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+import java.text.MessageFormat;
+
+public class StringNotEmptyValidator implements PropertyValidator {
+
+	@Override
+	public void validate(String property, Object value) throws ValidationException {
+		if (value != null && value.toString().trim().isEmpty()) {
+			throw new ValidationException(MessageFormat.format("The property {0} is required and cannot be empty", property));
+		}
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java
new file mode 100755
index 0000000..b560e9f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/settings/validation/ValidationException.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.settings.validation;
+
+import java.text.MessageFormat;
+
+public class ValidationException extends Exception {
+
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = 485240669635915916L;
+	private String property;
+	private String errorCause;
+
+	public ValidationException(String property, String errorMessage) {
+		this.errorCause = errorMessage;
+		this.property = property;
+	}
+
+	public ValidationException(String errorCause) {
+		this.errorCause = errorCause;
+	}
+
+	@Override
+	public String getMessage() {
+		if (property != null && errorCause != null) {
+			return MessageFormat.format("Error setting property {0}, {1}", property, errorCause);
+		} else if (errorCause != null) {
+			return MessageFormat.format("Error setting property value, {0}", errorCause);
+		} else {
+			return "Error setting property value.";
+		}
+	}
+
+	public String getProperty() {
+		return property;
+	}
+
+	public void setProperty(String property) {
+		this.property = property;
+	}
+
+	public String getErrorCause() {
+		return errorCause;
+	}
+
+	public void setErrorCause(String error) {
+		this.errorCause = error;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java
new file mode 100755
index 0000000..cc218fb
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryService.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.spark;
+
+import org.apache.spark.sql.SparkSession;
+
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+
+/**
+ * Interface to be implemented by generic Spark discovery services.
+ * 
+ *
+ */
+
+public interface SparkDiscoveryService extends SyncDiscoveryService {
+
+    /**
+     * Sets the Spark context to be used for processing the discovery service.
+     * 
+     */
+	void setSparkSession(SparkSession spark);
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java
new file mode 100755
index 0000000..28c7831
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkDiscoveryServiceBase.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.spark;
+
+import org.apache.spark.sql.SparkSession;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
+
+public abstract class SparkDiscoveryServiceBase extends SyncDiscoveryServiceBase implements SparkDiscoveryService  {
+	protected SparkSession spark;
+	protected MetadataStore mds;
+
+	@Override
+	public void setSparkSession(SparkSession spark) {
+		this.spark = spark;
+	}
+
+	@Override
+	public void setMetadataStore(MetadataStore mds) {
+		this.mds = mds;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java
new file mode 100755
index 0000000..d443303
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkServiceExecutor.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.spark;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+/**
+ * Internal interface to be used for processing Spark discovery services.
+ * 
+ *
+ */
+
+public interface SparkServiceExecutor {
+
+	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request);
+
+	DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer);
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java
new file mode 100755
index 0000000..50cb09f
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/spark/SparkUtils.java
@@ -0,0 +1,308 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.spark;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.RowFactory;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.DataTypes;
+import org.apache.spark.sql.types.StructField;
+import org.apache.spark.sql.types.StructType;
+import org.apache.spark.util.Utils;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+
+/**
+ * Provides a number of helper methods, mainly for working with Spark data frames.
+ * 
+ *
+ */
+
+public class SparkUtils {
+	static Logger logger = Logger.getLogger(SparkUtils.class.getName());
+
+    /**
+     * Creates a Spark data frame from a data set reference stored in a data set container.
+     * 
+     * @param sc Current Spark context
+     * @param request dsc Data set container that keeps the reference to the input data set
+     * @return Resulting Spark data frame
+     */
+	public static Dataset<Row> createDataFrame(SparkSession spark, DataSetContainer dsc, MetadataStore mds) {
+		Dataset<Row> df = null;
+		MetaDataObject ds = dsc.getDataSet();
+		if (ds instanceof DataFile) {
+			DataFile dataFile = (DataFile) ds;
+			logger.log(Level.INFO, MessageFormat.format("Reading DataFile {0} from URL {1}.",
+					new Object[] { dataFile.getName(), dataFile.getUrlString() }));
+			df = spark.read().format("csv").option("inferSchema", "true")
+					.option("header", "true").load(dataFile.getUrlString());
+		} else if (ds instanceof Table) {
+			Table table = (Table) ds;
+			MetadataStore availableMetadataStore;
+			if (mds.testConnection() == MetadataStore.ConnectionStatus.OK) {
+				availableMetadataStore = mds;
+			} else if (dsc.getMetaDataCache() != null) {
+				availableMetadataStore = new CachedMetadataStore(dsc.getMetaDataCache()); 
+			} else {
+				throw new RuntimeException("Discovery service has no access to the metadata store and no metadata cache is available.");
+			}
+			JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) availableMetadataStore.getConnectionInfo(table);
+			List<Connection> connections = connectionInfo.getConnections();
+			if (connections == null || connections.isEmpty()) {
+				// No connection information is attached to the relational table that was passed to the discovery service.
+				// This is typically caused by the fact that the Spark discovery service cannot access the ODF metadata API in order to retrieve cached objects
+				String msg = "Spark discovery service cannot access the ODF metadata API. Make sure that the ODF REST API is accessible from the discovery service running on the Spark cluster.";
+				logger.log(Level.SEVERE, msg);
+				throw new RuntimeException(msg);
+			}
+			JDBCConnection jdbcConnection = null;
+			for (Connection connection : connections) {
+				if (connection instanceof JDBCConnection) {
+					jdbcConnection = (JDBCConnection) connection;
+					break;
+				}
+			}
+			String driver = null;
+			try {
+				// Get JDBC driver class name needed for populating DataFrame
+				// below
+				driver = DriverManager.getConnection(jdbcConnection.getJdbcConnectionString(), jdbcConnection.getUser(),
+						jdbcConnection.getPassword()).getClass().getName();
+				logger.log(Level.INFO, MessageFormat.format("JDBC driver class name is {0}.", driver));
+			} catch (SQLException e) {
+				String msg = MessageFormat.format("Error connecting to JDBC data source {0}: ",
+						jdbcConnection.getJdbcConnectionString());
+				logger.log(Level.WARNING, msg, e);
+				throw new RuntimeException(msg + Utils.exceptionString(e));
+			}
+			String schemaName = connectionInfo.getSchemaName();
+			String url = jdbcConnection.getJdbcConnectionString() + ":currentSchema=" + schemaName + ";user="
+					+ jdbcConnection.getUser() + ";password=" + jdbcConnection.getPassword() + ";";
+			String dbtable = schemaName + "." + table.getName();
+			String msg = "Using JDBC parameters url: {0}, dbtable: {1}, driver: {2} to connect to DB2 database.";
+			logger.log(Level.INFO, MessageFormat.format(msg, new Object[] { url, dbtable, driver }));
+			Map<String, String> options = new HashMap<String, String>();
+			options.put("url", url);
+			options.put("dbtable", dbtable);
+			options.put("driver", "com.ibm.db2.jcc.DB2Driver");
+			df = spark.read().format("jdbc").options(options).load();
+		}
+		return df;
+	}
+
+    /**
+     * Generates ODF annotations from a annotation data frames. 
+     * 
+     * @param container Data set container that contains the reference to the data set to be annotated
+     * @param annotationDataFrameMap Maps the annotation types to be created with the annotation data frames that contain the actual annotation data
+     * @return Result object that contains a list of ODF annotations
+     */
+	public static DiscoveryServiceResult createAnnotationsFromDataFrameMap(DataSetContainer container, Map<String, Dataset<Row>> annotationDataFrameMap, MetadataStore mds) throws RuntimeException {
+		RelationalDataSet tab = (RelationalDataSet) container.getDataSet();
+		DiscoveryServiceResult result = new DiscoveryServiceResult();
+
+		// Map input table columns to metadata object references
+		Map<String, MetaDataObjectReference> columnReferencesByName = new HashMap<>();
+
+		List<Column> colList ;
+		if (mds.testConnection() == MetadataStore.ConnectionStatus.OK) {
+			colList = mds.getColumns(tab);
+		} else if (container.getMetaDataCache() != null) {
+			CachedMetadataStore cacheReader = new CachedMetadataStore(container.getMetaDataCache());
+			colList = cacheReader.getColumns(tab);
+		} else {
+			throw new RuntimeException("Discovery service has no access to the metadata store and no metadata cache is available.");
+		}
+
+		for (MetaDataObject colMDO : colList) {
+			Column oMColumn = (Column) colMDO;
+			columnReferencesByName.put(oMColumn.getName(), oMColumn.getReference());
+		}
+
+		List<Annotation> annotations = new ArrayList<>();
+		Dataset<Row> df = null;
+		for (Map.Entry<String, Dataset<Row>> entry : annotationDataFrameMap.entrySet()) {
+			String annotationType = entry.getKey();
+			df = entry.getValue();
+			String columnToBeAnnotated = null;
+			int rowNumber = 0;
+			try {
+				List<Row> rows = df.collectAsList();
+				String[] columnNames = df.columns();
+				StructType st = df.schema();
+
+				for (rowNumber = 0; rowNumber < rows.size(); rowNumber++) {
+					if (columnNames[0].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME)) {
+						// Generate column annotations by mapping DataFrame
+						// table column values to annotation properties
+						// Column ANNOTATION_PROPERTY_COLUMN_NAME represents the
+						// column to be annotated
+						columnToBeAnnotated = rows.get(rowNumber).getString(0);
+						MetaDataObjectReference annotatedColumn = columnReferencesByName.get(columnToBeAnnotated);
+						if (annotatedColumn != null) {
+							logger.log(Level.FINE, MessageFormat.format("Annotating column {0}:", columnToBeAnnotated));
+							annotations.add((Annotation) getAnnotation(st, columnNames, annotationType, rows.get(rowNumber),
+									annotatedColumn));
+						} else {
+							logger.log(Level.FINE, "Column " + columnToBeAnnotated
+									+ " returned by the Spark service does not match any column of the input data set.");
+						}
+					} else {
+						// Creating table annotations
+						logger.log(Level.INFO,
+								MessageFormat.format(
+										"Data frame does not contain column {0}. Creating table annotations.",
+										DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME));
+						annotations.add((Annotation) getAnnotation(st, columnNames, annotationType, rows.get(rowNumber),
+								container.getDataSet().getReference()));
+					}
+				}
+			} catch (JSONException exc) {
+				String msg = MessageFormat.format(
+						"Error processing results returned by DataFrame row {0} column {1}. See ODF application lof for details.",
+						new Object[] { rowNumber, columnToBeAnnotated });
+				logger.log(Level.WARNING, msg);
+				throw new RuntimeException(msg, exc);
+			}
+		}
+		result.setAnnotations(annotations);
+		return result;
+	}
+
+    /**
+     * Creates a single ODF annotation from a row of input data. 
+     * 
+     * @param st Data types of the annotation attributes 
+     * @param columnNames Names of the annotation attributes
+     * @param row Input data that represents the values of the annotation attributes  
+     * @return A single ODF annotation object
+     */
+	public static Annotation getAnnotation(StructType st, String[] columnNames, String annotationType, Row row,
+			MetaDataObjectReference annotatedObject) throws JSONException {
+		ProfilingAnnotation an = new ProfilingAnnotation();
+		an.setAnnotationType(annotationType);
+		an.setProfiledObject(annotatedObject);
+		JSONObject jsonProperties = new JSONObject();
+		for (int j = 0; j < columnNames.length; j++) {
+			if (!columnNames[j].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_PROPERTY_COLUMN_NAME)) {
+				if (columnNames[j].equals(DiscoveryServiceSparkEndpoint.ANNOTATION_SUMMARY_COLUMN_NAME)) {
+					an.setSummary(row.getString(j));
+				} else {
+					String annotationPropertyName = columnNames[j];
+					DataType dataType = st.apply(annotationPropertyName).dataType();
+					if (dataType == DataTypes.IntegerType) {
+						jsonProperties.put(annotationPropertyName, row.getInt(j));
+					} else if (dataType == DataTypes.DoubleType) {
+						jsonProperties.put(annotationPropertyName, row.getDouble(j));
+					} else if (dataType == DataTypes.BooleanType) {
+						jsonProperties.put(annotationPropertyName, row.getBoolean(j));
+					} else if (dataType == DataTypes.FloatType) {
+						jsonProperties.put(annotationPropertyName, row.getFloat(j));
+					} else if (dataType == DataTypes.LongType) {
+						jsonProperties.put(annotationPropertyName, row.getLong(j));
+					} else if (dataType == DataTypes.ShortType) {
+						jsonProperties.put(annotationPropertyName, row.getShort(j));
+					} else {
+						// Return all other data types as String
+						jsonProperties.put(annotationPropertyName, row.getString(j));
+					}
+					logger.log(Level.FINE, "Set attribute " + annotationPropertyName + " to value " + row.get(j) + ".");
+				}
+			}
+		}
+		an.setJsonProperties(jsonProperties.toString());
+		return an;
+	}
+
+    /**
+     * Transposes a Spark data frame by replacing its rows by its columns. All input columns are expected to be of type Double.
+     * The fist column of the resulting data frame contains the column names of the input data frame and is of data type String. All other output columns are of type Double.
+     * 
+     * @param sc Current Spark context 
+     * @param origDataFrame Data frame to be transposed
+     * @return Transposed data frame
+     */
+	public static Dataset<Row> transposeDataFrame(SparkSession spark, Dataset<Row> origDataFrame) {
+		Dataset<Row> transposedDataFrame = null;
+		String[] origColumnNames = origDataFrame.columns();
+		int origNumberColumns = origColumnNames.length;
+		List<Row> origRows = origDataFrame.collectAsList();
+		int origNumberRows = origRows.size();
+		List<Row> transposedRows = new ArrayList<Row>();
+
+		// Loop through columns of original DataFrame
+		for (int i = 1; i < origNumberColumns; i++) {
+			Object[] transposedRow = new Object[origNumberRows + 1];
+			transposedRow[0] = origColumnNames[i];
+			// Loop trough rows of original DataFrame
+			for (int j = 0; j < origNumberRows; j++) {
+				if (origRows.get(j).getString(i) == null) {
+					transposedRow[j + 1] = null;
+				} else {
+					try {
+						transposedRow[j + 1] = Double.parseDouble(origRows.get(j).getString(i));
+					} catch(NumberFormatException e) {
+						if (logger.getLevel() == Level.FINEST) {
+							String msg = MessageFormat.format("Cannot convert DataFrame column {0} row {1} value ''{2}'' to Double.", new Object[] { i, j, origRows.get(j).getString(i) });
+							logger.log(Level.FINEST, msg);
+						}
+						// Return null for all non-numeric fields
+						transposedRow[j + 1] = null;
+					}
+				}
+			}
+			transposedRows.add(RowFactory.create(transposedRow));
+		}
+
+		// Store original column name in first column of transposed DataFrame
+		StructField[] transposedColumnNames = new StructField[origNumberRows + 1];
+		transposedColumnNames[0] = DataTypes.createStructField(origColumnNames[0], DataTypes.StringType, false);
+		for (int j = 0; j < origNumberRows; j++) {
+			transposedColumnNames[j + 1] = DataTypes.createStructField(origRows.get(j).getString(0), DataTypes.DoubleType, false);
+		}
+		StructType st = DataTypes.createStructType(transposedColumnNames);
+		transposedDataFrame = spark.createDataFrame(transposedRows, st);
+		return transposedDataFrame;
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java
new file mode 100755
index 0000000..e4fcaa3
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/api/utils/ODFLogConfig.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.api.utils;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.logging.FileHandler;
+import java.util.logging.Formatter;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.logging.SimpleFormatter;
+
+/**
+ * Class to be used for log configuration.
+ * It reads the system property odf.logspec which value must be of the form
+ * <Level>,<FilePattern>
+ * For instance
+ *   ALL,/tmp/odf-trace.log
+ *
+ *
+ */
+public class ODFLogConfig {
+
+	void log(String s) {
+		System.out.println(s);
+	}
+
+	public static class ODFFileHandler extends FileHandler {
+		public ODFFileHandler(String pattern) throws IOException {
+			super(pattern);
+		}
+	}
+
+	Handler createHandler(String odfLogFilePattern) {
+		Handler result = null;
+		Formatter formatter = new SimpleFormatter();
+		try {
+			// 1MB file limit, 3 files
+			result = new ODFFileHandler(odfLogFilePattern);
+			result.setFormatter(formatter);
+		} catch (Exception exc) {
+			exc.printStackTrace();
+			return null;
+		}
+		return result;
+	}
+
+	private ODFLogConfig() {
+		try {
+			String logSpec = System.getProperty("odf.logspec");
+			log("ODF Logging spec of system property odf.logspec: " + logSpec);
+			if (logSpec == null) {
+				logSpec = System.getenv("ODFLOGSPEC");
+				log("ODF Logging spec of env var ODFLOGSPEC " + logSpec);
+				if (logSpec == null) {
+					return;
+				}
+			}
+			int ix = logSpec.indexOf(",");
+			if (ix == -1) {
+				return;
+			}
+			String levelString = logSpec.substring(0, ix);
+
+			String odfLogFilePattern = logSpec.substring(ix + 1);
+			String msg = MessageFormat.format("Configuring ODF logging with level {0} and log file: {1}", new Object[] { levelString, odfLogFilePattern });
+			log(msg);
+
+			Handler odfHandler = createHandler(odfLogFilePattern);
+			if (odfHandler == null) {
+				return;
+			}
+			Level level = Level.parse(levelString);
+			Logger odfRootLogger = Logger.getLogger("org.apache.atlas.odf");
+
+			// remove existing handler
+			for (Handler h : odfRootLogger.getHandlers()) {
+				if (h instanceof ODFFileHandler) {
+					odfRootLogger.removeHandler(h);
+				}
+			}
+
+			odfRootLogger.setLevel(level);
+			odfHandler.setLevel(level);
+			odfRootLogger.addHandler(odfHandler);
+			log("ODF logger configured.");
+		} catch (Exception exc) {
+			exc.printStackTrace();
+		}
+	}
+
+	static Object lockObject = new Object();
+	static ODFLogConfig config = null;
+
+	public static void run() {
+		synchronized (lockObject) {
+			if (config == null) {
+				config = new ODFLogConfig();
+			}
+		}
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java
new file mode 100755
index 0000000..6ea9c97
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationDeserializer.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.json;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+
+/**
+ * The Jackson deserializer for Annotation objects
+ * 
+ *
+ */
+public class AnnotationDeserializer extends StdDeserializer<Annotation> {
+
+	private static final long serialVersionUID = -3143233438847937374L;
+	
+	Logger logger = Logger.getLogger(AnnotationDeserializer.class.getName());
+	
+	public AnnotationDeserializer() {
+		super(Annotation.class);
+	}
+
+	ClassLoader getClassLoader() {
+		return this.getClass().getClassLoader();
+	}
+	
+	@Override
+	public Annotation deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
+		ObjectMapper jpom = ((ObjectMapper) jp.getCodec());
+		ObjectNode tree = jpom.readTree(jp);
+		String jsonString = tree.toString();
+		Annotation result = null;
+
+		Class<? extends Annotation> javaClass = null;
+		JsonNode javaClassNode = tree.get("javaClass");
+		if (javaClassNode == null) {
+			throw new IOException("Can not deserialize object since the javaClass attribute is missing: " + jsonString);
+		}
+		JsonNode jsonPropertiesNode = tree.get("jsonProperties");
+		String javaClassName = javaClassNode.asText();
+		if (javaClassName.equals(ProfilingAnnotation.class.getName())) {
+			javaClass = ProfilingAnnotation.class;
+		}
+		else if (javaClassName.equals(ClassificationAnnotation.class.getName())) {
+			javaClass = ClassificationAnnotation.class;
+		}
+		else if (javaClassName.equals(RelationshipAnnotation.class.getName())) {
+			javaClass = RelationshipAnnotation.class;
+		}
+		else {
+			try {
+				javaClass = (Class<? extends Annotation>) this.getClassLoader().loadClass(javaClassName);
+				if (jsonPropertiesNode != null && !jsonPropertiesNode.isNull()) { // unfold jsonProperties in case of specific annotations
+					JsonNode jsonPropertiesNodeUnfolded = null;
+					if (jsonPropertiesNode.isTextual()) {
+						jsonPropertiesNodeUnfolded = jpom.readTree(jsonPropertiesNode.asText());					
+					}
+					else {
+						jsonPropertiesNodeUnfolded = jsonPropertiesNode; 
+					}
+					JsonNode newJsonPropertiesNode = (JsonNode)jp.getCodec().createObjectNode();    // initialize new jsonProperties node
+					Field classFields[] = javaClass.getDeclaredFields();
+					HashSet<String> classFieldSet = new HashSet<String>();
+					for (Field f: classFields) {
+						f.setAccessible(true);
+						String fieldName = f.getName();
+						classFieldSet.add(fieldName);
+					}
+					Iterator<Entry<String,JsonNode>> jsonPropertiesFields = jsonPropertiesNodeUnfolded.fields();
+					while (jsonPropertiesFields.hasNext()) { 
+						Entry<String,JsonNode> field = jsonPropertiesFields.next();
+						String fieldName = field.getKey();
+						if (JSONUtils.annotationFields.contains(fieldName)) {
+							throw new IOException("Name conflict: Field name in jsonProperties matches predefined field [" + fieldName + "]");
+						}
+						JsonNode fieldValue = field.getValue();
+						if (classFieldSet.contains(fieldName)) {
+							tree.set(fieldName, fieldValue);							
+						}
+						else {
+							((ObjectNode)newJsonPropertiesNode).set(fieldName, field.getValue());							
+						}
+					}
+					tree.put("jsonProperties", newJsonPropertiesNode.textValue());
+				}
+			} catch (ClassNotFoundException exc) {
+				String msg = MessageFormat.format("Java class ''{0}'' could not be deserialized automatically (probably because it is not on the classpath)", javaClassName);
+				logger.warning(msg);
+				logger.log(Level.FINE, msg, exc);
+			}
+			if (javaClass == null) {
+				if (tree.get("profiledObject") != null) {   // class not found -> create as instance of corresponding 'unknown' types
+					javaClass = ProfilingAnnotation.class;
+				}
+				else if (tree.get("classifiedObject") != null) {
+					javaClass = ClassificationAnnotation.class;
+				}
+				else if (tree.get("relatedObjects") != null) {
+					javaClass = RelationshipAnnotation.class;
+				}
+				else { // malformed annotation
+					javaClass = Annotation.class;
+				}
+				if (jsonPropertiesNode == null) {
+					jsonPropertiesNode = (JsonNode)jp.getCodec().createObjectNode(); // initialize if not already present
+				}
+				Iterator<Entry<String,JsonNode>> fields = tree.fields();
+				ArrayList<String> fieldsToRemove = new ArrayList<String>();
+				try {
+					while (fields.hasNext()) {     // move all fields not present in the predefined annotation types
+						Entry<String,JsonNode> field = fields.next();   // to the string valued jsonProperties attribute
+						String fieldName = field.getKey();
+						if (!JSONUtils.annotationFields.contains(fieldName)) {
+							((ObjectNode)jsonPropertiesNode).set(fieldName, field.getValue());
+							fieldsToRemove.add(fieldName);
+						}
+					}
+					String jsonProperties = (jsonPropertiesNode.isTextual()) ? jsonPropertiesNode.textValue() : jsonPropertiesNode.toString();
+					tree.put("jsonProperties", jsonProperties); 
+					for (String fieldToRemove:fieldsToRemove) {  // remove fields not present in the predefined annotation types
+						tree.remove(fieldToRemove);
+					}
+				}
+				catch (Exception e) {
+					throw new IOException(e);
+				}
+			}
+			jsonString = tree.toString();				
+		}
+		result = jpom.readValue(jsonString, javaClass);
+		logger.log(Level.FINEST, "Annotation created. Original: {0}, deserialized annotation: {1}", new Object[]{ jsonString, JSONUtils.lazyJSONSerializer(result)});
+		return result;
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java
new file mode 100755
index 0000000..6fcc28e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/AnnotationSerializer.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.json;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.SerializerProvider;
+import com.fasterxml.jackson.databind.ser.std.StdSerializer;
+
+/**
+ * The Jackson serializer for Annotation objects
+ * 
+ *
+ */
+public class AnnotationSerializer extends StdSerializer<Annotation> {
+
+	public AnnotationSerializer() {
+		this(null);
+	}
+	
+	public AnnotationSerializer(Class<Annotation> t) {
+		super(t);
+	}
+	
+	Logger logger = Logger.getLogger(AnnotationSerializer.class.getName());
+
+	ClassLoader getClassLoader() {
+		return this.getClass().getClassLoader();
+	}
+	
+	// In the following jsonProperties is either already pre-populated (because we are serializing an instance of ProfilingAnnotation, ....
+	// or it is created from all attributes not present in ProfilingAnnotation, or its ancestors (e.g. serializing an instance of ColumnAnalysisColumnAnntation)
+	// in the latter case jsonProperties is expected to be null
+	
+	@Override
+	public void serialize(Annotation annot, JsonGenerator jg, SerializerProvider sp) throws IOException, JsonProcessingException {
+		jg.writeStartObject();
+		Class<?> cl = annot.getClass();
+		class JSONPropField {
+			String name;
+			Object value;
+			JSONPropField(String name, Object value) {this.name = name; this.value = value;}
+		}
+		ArrayList<JSONPropField> jsonPropFields = null;
+		String jsonPropertiesValue = null;
+		while (cl != Object.class) {   // process class hierarchy up to and including MetaDataObject.class
+			Field fields[] = cl.getDeclaredFields();
+			for (Field f: fields) {
+				f.setAccessible(true);
+				String fieldName = f.getName();
+				try {
+					Object fieldValue = f.get(annot);
+					if (fieldName.equals("jsonProperties")) {
+						jsonPropertiesValue = (String)fieldValue;
+					}
+					else if (JSONUtils.annotationFields.contains(fieldName)) {
+						jg.writeFieldName(fieldName);
+						jg.writeObject(fieldValue);							
+					}
+					else {
+						if (jsonPropFields == null) jsonPropFields = new ArrayList<JSONPropField>();
+						jsonPropFields.add(new JSONPropField(fieldName, fieldValue));
+					}
+				}
+				catch (IllegalAccessException e) {
+					throw new IOException(e);
+				}
+			}
+			cl = cl.getSuperclass();
+		}
+		jg.writeFieldName("jsonProperties");
+		if (jsonPropFields != null) {
+			jg.writeStartObject();
+			if (jsonPropertiesValue != null) {
+				try {
+					JSONObject jo = new JSONObject(jsonPropertiesValue);
+					Iterator<String> it = jo.keys();
+			         while(it.hasNext()) {
+			             String key = it.next();
+			             jg.writeFieldName(key);
+			             jg.writeObject(jo.get(key));
+					}					
+				}
+				catch (JSONException e) {
+					throw new IOException(e);					
+				}
+			}
+			for (JSONPropField jpf:jsonPropFields) {
+				jg.writeFieldName(jpf.name);
+				jg.writeObject(jpf.value);								
+			}
+			jg.writeEndObject();				
+		}
+		else {
+			jg.writeString(jsonPropertiesValue);
+		}
+		jg.writeEndObject();
+	}
+
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java
new file mode 100755
index 0000000..d1ae80e
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/DefaultODFDeserializer.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.json;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
+
+public class DefaultODFDeserializer<T> extends StdDeserializer<T> {
+	private static final long serialVersionUID = 4895771352050172936L;
+
+	Logger logger = Logger.getLogger(DefaultODFDeserializer.class.getName());
+
+	Class<? extends T> defaultClass;
+
+	public DefaultODFDeserializer(Class<T> cl, Class<? extends T> defaultClass) {
+		super(cl);
+		this.defaultClass = defaultClass;
+	}
+
+	ClassLoader getClassLoader() {
+		return this.getClass().getClassLoader();
+	}
+
+	@Override
+	public T deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException, JsonProcessingException {
+		ObjectMapper jpom = ((ObjectMapper) jp.getCodec());
+		JsonNode tree = jpom.readTree(jp);
+		String jsonString = tree.toString();
+
+		Class<? extends T> javaClass = null;
+		String javaClassName = null;
+		try {
+			JsonNode javaClassNode = tree.get("javaClass");
+			javaClassName = javaClassNode.asText();
+			logger.log(Level.FINEST, "Trying to deserialize object of java class {0}", javaClassName);
+			javaClass = (Class<? extends T>) this.getClassLoader().loadClass(javaClassName);
+			if (javaClass != null) {
+				if (!javaClass.equals(this.handledType())) {
+					return jpom.readValue(jsonString, javaClass);
+				}
+			}
+		} catch (Exception exc) {
+			String msg = MessageFormat.format("Java class ''{0}'' could not be deserialized automatically (probably because it is not on the classpath)", javaClassName);
+			logger.warning(msg);
+			logger.log(Level.FINE, msg, exc);
+		}
+		return jpom.readValue(jsonString, defaultClass);
+	}
+}
diff --git a/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java
new file mode 100755
index 0000000..fe9d592
--- /dev/null
+++ b/odf/odf-api/src/main/java/org/apache/atlas/odf/json/JSONUtils.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.json;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.wink.json4j.JSONArray;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.core.Version;
+import com.fasterxml.jackson.databind.Module;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
+import org.apache.atlas.odf.api.metadata.UnknownMetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
+import org.apache.atlas.odf.api.metadata.models.UnknownConnection;
+import org.apache.atlas.odf.api.metadata.models.UnknownConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataStore;
+
+public class JSONUtils {
+	
+	public static HashSet<String> annotationFields = new HashSet<String>();
+	
+	static {
+		for (Class<?> cl: new Class<?>[]{Annotation.class, ProfilingAnnotation.class, ClassificationAnnotation.class,RelationshipAnnotation.class}) {
+			while (cl != Object.class) {   // process class hierarchy up to and including MetaDataObject.class
+				Field fields[] = cl.getDeclaredFields();
+				for (Field f: fields) {
+					f.setAccessible(true);
+					annotationFields.add(f.getName());
+				}
+				cl = cl.getSuperclass();
+			}			
+		}
+	}
+
+
+
+	// reuse object mapper for performance
+	private static ObjectMapper om = null;
+
+	static {
+		om = new ObjectMapper();
+		Module mod = createDefaultObjectMapperModule();
+		om.registerModule(mod);
+	}
+
+	public static ObjectMapper getGlobalObjectMapper() {
+		return om;
+	}
+
+	static Module createDefaultObjectMapperModule() {
+		SimpleModule mod = new SimpleModule("ODF Jackson module", Version.unknownVersion());
+		mod.addDeserializer(Annotation.class, new AnnotationDeserializer());
+		mod.addDeserializer(MetaDataObject.class, new DefaultODFDeserializer<MetaDataObject>(MetaDataObject.class, UnknownMetaDataObject.class));
+		mod.addDeserializer(DataSet.class, new DefaultODFDeserializer<DataSet>(DataSet.class, UnknownDataSet.class));
+		mod.addDeserializer(DataStore.class, new DefaultODFDeserializer<DataStore>(DataStore.class, UnknownDataStore.class));
+		mod.addDeserializer(Connection.class, new DefaultODFDeserializer<Connection>(Connection.class, UnknownConnection.class));
+		mod.addDeserializer(ConnectionInfo.class, new DefaultODFDeserializer<ConnectionInfo>(ConnectionInfo.class, UnknownConnectionInfo.class));
+		
+		mod.addSerializer(Annotation.class, new AnnotationSerializer());
+		return mod;
+
+	}
+	
+	public static JSONObject toJSONObject(Object o) throws JSONException {
+		JSONObject result;
+		try {
+			result = new JSONObject(om.writeValueAsString(o));
+			if (o instanceof Annotation) {
+				Object jsonPropsObject = result.get("jsonProperties");
+				if (jsonPropsObject instanceof JSONObject) {    // the value of jsonProperties must be of type 'String'
+					result.put("jsonProperties", ((JSONObject)jsonPropsObject).toString());	
+				}
+			}
+		} catch (JsonProcessingException e) {
+			throw new JSONException(e);
+		}
+		return result;
+	}
+
+	public static String toJSON(Object o) throws JSONException {
+		String result;
+		try {
+			result = om.writeValueAsString(o);
+			if (o instanceof Annotation) {
+				JSONObject json = new JSONObject(result);
+				Object jsonPropsObject = json.get("jsonProperties");
+				if (jsonPropsObject instanceof JSONObject) {    // the value of jsonProperties must be of type 'String'
+					json.put("jsonProperties", ((JSONObject)jsonPropsObject).toString());	
+					result = json.toString();
+				}
+			}
+		} catch (JsonProcessingException e) {
+			throw new JSONException(e);
+		}
+		return result;
+	}
+
+	public static <T> List<T> fromJSONList(String s, Class<T> cl) throws JSONException {
+		JSONArray ar = new JSONArray(s);
+		List<T> result = new ArrayList<>();
+		for (Object o : ar) {
+			JSONObject jo = (JSONObject) o;
+			T t = (T) fromJSON(jo.write(), cl);
+			result.add(t);
+		}
+		return result;
+
+	}
+
+	public static <T> List<T> fromJSONList(InputStream is, Class<T> cl) throws JSONException {
+		JSONArray ar = new JSONArray(is);
+		List<T> result = new ArrayList<>();
+		for (Object o : ar) {
+			JSONObject jo = (JSONObject) o;
+			T t = (T) fromJSON(jo.write(), cl);
+			result.add(t);
+		}
+		return result;
+	}
+
+	public static <T> T fromJSON(String s, Class<T> cl) throws JSONException {
+		T result = null;
+		try {
+			result = om.readValue(s, cl);
+		} catch (JsonProcessingException exc) {
+			// propagate JSON exception
+			throw new JSONException(exc);
+		} catch (IOException e) {
+			throw new RuntimeException(e);
+		}
+
+		return result;
+	}
+
+	public static <T> T fromJSON(InputStream is, Class<T> cl) throws JSONException {
+		return fromJSON(getInputStreamAsString(is, "UTF-8"), cl);
+	}
+
+	public static <T> T readJSONObjectFromFileInClasspath(Class<T> cl, String pathToFile, ClassLoader classLoader) {
+		if (classLoader == null) {
+			// use current classloader if not provided
+			classLoader = JSONUtils.class.getClassLoader();
+		}
+		InputStream is = classLoader.getResourceAsStream(pathToFile);
+		T result = null;
+		try {
+			result = om.readValue(is, cl);
+		} catch (IOException e) {
+			// assume that this is a severe error since the provided JSONs should be correct
+			throw new RuntimeException(e);
+		}
+
+		return result;
+	}
+
+	public static <T> T cloneJSONObject(T obj) throws JSONException {
+		// special case: use Annotation.class in case obj is an annotation subclass to ensure that the annotation deserializer is used
+		if (Annotation.class.isAssignableFrom(obj.getClass())) {
+			return (T) fromJSON(toJSON(obj), Annotation.class);
+		}
+		return fromJSON(toJSON(obj), (Class<T>) obj.getClass());
+	}
+
+	
+	public static void mergeJSONObjects(JSONObject source, JSONObject target) {
+		if (source != null && target != null) {
+			target.putAll(source);
+		}
+	}
+
+	// use this method, e.g., if you want to use JSON objects in log / trace messages
+	// and want to do serialization only if tracing is on
+	public static Object lazyJSONSerializer(final Object jacksonObject) {
+		return new Object() {
+
+			@Override
+			public String toString() {
+				try {
+					return toJSON(jacksonObject);
+				} catch (JSONException e) {
+					return e.getMessage();
+				}
+			}
+
+		};
+	}
+
+	public static Object jsonObject4Log(final JSONObject obj) {
+		return new Object() {
+
+			@Override
+			public String toString() {
+				try {
+					return obj.write();
+				} catch (Exception e) {
+					return e.getMessage();
+				}
+			}
+
+		};
+	}
+
+	public static String getInputStreamAsString(InputStream is, String encoding) {
+		try {
+			final int n = 2048;
+			byte[] b = new byte[0];
+			byte[] temp = new byte[n];
+			int bytesRead;
+			while ((bytesRead = is.read(temp)) != -1) {
+				byte[] newB = new byte[b.length + bytesRead];
+				System.arraycopy(b, 0, newB, 0, b.length);
+				System.arraycopy(temp, 0, newB, b.length, bytesRead);
+				b = newB;
+			}
+			String s = new String(b, encoding);
+			return s;
+		} catch (IOException exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+	
+	public static <T, S> T convert(S source, Class<T> targetClass) throws JSONException {
+		return fromJSON(toJSON(source), targetClass);
+	}
+}
diff --git a/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java b/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java
new file mode 100755
index 0000000..da8d3af
--- /dev/null
+++ b/odf/odf-api/src/test/java/org/apache/atlas/odf/test/json/ODFJSONSerializationTest.java
@@ -0,0 +1,406 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.test.json;
+
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.InvalidReference;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+import org.apache.wink.json4j.JSON;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ODFJSONSerializationTest {
+
+	Logger logger = Logger.getLogger(ODFJSONSerializationTest.class.getName());
+
+	MetaDataObjectReference createNewRef() {
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId(UUID.randomUUID().toString());
+		ref.setRepositoryId("odftestrepositoryid");
+		return ref;
+	}
+
+	static class NewAnnotation extends ProfilingAnnotation {
+		String newProp;
+
+		public String getNewProp() {
+			return newProp;
+		}
+
+		public void setNewProp(String newProp) {
+			this.newProp = newProp;
+		}
+
+	}
+
+	List<MetaDataObject> createTestObjects() throws JSONException, ParseException {
+		List<MetaDataObject> testObjects = new ArrayList<>();
+
+		Column col = new Column();
+		MetaDataObjectReference colref = createNewRef();
+		col.setReference(colref);
+		col.setName("col1");
+		col.setDescription("column desc");
+		col.setDataType("theDatatype");
+
+		Table t = new Table();
+		MetaDataObjectReference tableRef = createNewRef();
+		t.setReference(tableRef);
+		t.setName("Table");
+		t.setDescription("table desc");
+
+		Database db = new Database();
+		MetaDataObjectReference dbref = createNewRef();
+		db.setReference(dbref);
+		db.setName("DB");
+		db.setDescription("db description");
+
+		JDBCConnection jdbcConn = new JDBCConnection();
+		MetaDataObjectReference jdbcConnRef = createNewRef();
+		jdbcConn.setReference(jdbcConnRef);
+		jdbcConn.setName("jdbc connection");
+		jdbcConn.setUser("theUser");
+		jdbcConn.setPassword("thePassword");
+		jdbcConn.setJdbcConnectionString("jdbc:db2:localhost:50000/SAMPLE");
+		db.setConnections(Collections.singletonList(jdbcConnRef));
+
+		ProfilingAnnotation profAnnot1 = new ProfilingAnnotation();
+		MetaDataObjectReference uaRef = createNewRef();
+		profAnnot1.setReference(uaRef);
+		profAnnot1.setProfiledObject(jdbcConnRef);
+		profAnnot1.setJsonProperties("{\"a\": \"b\"}");
+
+		ProfilingAnnotation profAnnot2 = new ProfilingAnnotation();
+		MetaDataObjectReference mdoRef = createNewRef();
+		profAnnot2.setReference(mdoRef);
+		profAnnot2.setProfiledObject(jdbcConnRef);
+		profAnnot2.setJsonProperties("{\"a\": \"b\"}");
+
+		NewAnnotation newAnnot = new NewAnnotation();
+		MetaDataObjectReference newAnnotRef = createNewRef();
+		newAnnot.setReference(newAnnotRef);
+
+		// a generic DataSet
+		UnknownDataSet ds = new UnknownDataSet();
+		ds.setName("generic data set");
+		ds.setReference(createNewRef());
+
+		MetaDataObject[] mdos = new MetaDataObject[] { db, jdbcConn, t, col, profAnnot1, profAnnot2, newAnnot, ds };
+		testObjects.addAll(Arrays.asList(mdos));
+		return testObjects;
+	}
+
+	@Test
+	public void testSerialization() throws Exception {
+		List<MetaDataObject> testObjects = createTestObjects();
+
+		for (MetaDataObject testObject : testObjects) {
+			Class<?> cl = testObject.getClass();
+			logger.info("Testing serialization / deserialization of object: " + testObject + " of class: " + cl);
+
+			String json = JSONUtils.toJSON(testObject);
+			logger.info("Serialized json: " + json);
+
+			Object objStronglyTypedClass;
+			if (testObject instanceof Annotation) { // special treatment for Annotations -> 2nd arg of fromJSON() needs to be Annotation.class
+				objStronglyTypedClass = JSONUtils.fromJSON(json, Annotation.class);
+				Assert.assertEquals(cl, objStronglyTypedClass.getClass());
+			}
+			else {
+				 objStronglyTypedClass = JSONUtils.fromJSON(json, cl);
+				 Assert.assertEquals(cl, objStronglyTypedClass.getClass());
+			}
+			String json1 = JSONUtils.toJSON(objStronglyTypedClass);
+			Assert.assertEquals(json, json1);
+
+			Object objWithGenericClass = JSONUtils.fromJSON(json, MetaDataObject.class);
+
+			Assert.assertEquals(cl, objWithGenericClass.getClass());
+			String json2 = JSONUtils.toJSON(objWithGenericClass);
+			Assert.assertEquals(json, json2);
+
+			Class<?> intermediateClasses[] = new Class<?>[] { MetaDataObject.class, DataSet.class, DataStore.class, Connection.class };
+
+			for (Class<?> intermediateClass : intermediateClasses) {
+				logger.info("Checking intermediate class: " + intermediateClass);
+				if (intermediateClass.isAssignableFrom(cl)) {
+
+					Object intermediateObject = JSONUtils.fromJSON(json, intermediateClass);
+					logger.info("Deserialized object: " + intermediateObject);
+					logger.info("Deserialized object class: " + intermediateObject.getClass());
+
+					Assert.assertTrue(intermediateClass.isAssignableFrom(intermediateObject.getClass()));
+					Assert.assertEquals(cl, intermediateObject.getClass());
+					String json3 = JSONUtils.toJSON(intermediateObject);
+					Assert.assertEquals(json, json3);
+				}
+			}
+
+		}
+	}
+
+	/**
+	 * Test serialization of an Annotation (subclass) which has both, its own fields (to be mapped to jsonProperties) and
+	 * a non-empty jsonProperties attribute holding the string representation of a Json object.
+	 */
+
+	@Test
+	public void testJsonPropertiesMerge() {
+		NewAnnotation annot = new NewAnnotation();
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId("id");
+		ref.setRepositoryId("repoid");
+		ref.setUrl("http://url");
+		annot.setProfiledObject(ref);
+		annot.setNewProp("newPropValue");
+		annot.setJsonProperties("{\"oldProp\":\"oldPropValue\"}");
+		JSONObject jo = null;
+		try {
+			jo = JSONUtils.toJSONObject(annot);
+			String jsonPropertiesString = jo.getString("jsonProperties");
+			JSONObject jo2 = new JSONObject(jsonPropertiesString);
+			Assert.assertEquals("oldPropValue", jo2.get("oldProp"));
+			Assert.assertEquals("newPropValue", jo2.get("newProp"));
+		}
+		catch (JSONException e) {
+			e.printStackTrace();
+		}
+	}
+
+	final static private String MERGED_JSON = "{" +
+			"\"analysisRun\":null," +
+			"\"summary\":null," +
+			"\"reference\":null," +
+			"\"originRef\":null," +
+			"\"replicaRefs\":null," +
+			"\"javaClass\":\"org.apache.atlas.odf.json.test.ODFJSONSerializationTest$NewAnnotation\"," +
+			"\"jsonProperties\":\"{" +
+			   "\\\"newProp\\\":\\\"newPropValue\\\"," +
+			   "\\\"oldProp\\\":\\\"oldPropValue\\\"" +
+			   "}\"," +
+			"\"name\":null," +
+			"\"annotationType\":\"NewAnnotation\"," +
+			"\"description\":null," +
+			"\"profiledObject\":{" +
+			   "\"repositoryId\":\"repoid\"," +
+			   "\"id\":\"id\"," +
+			   "\"url\":\"http://url\"}" +
+	        "}";
+
+	/**
+	 * Test deserialization of a Json object which has fields in its jsonProperties that can not be mapped to native fields of
+	 * the target class (= value of javaClass field). These and only these remain as fields in the text encoded Json object
+	 * stored in the jsonProperties field of the result.
+	 */
+
+	@Test
+	@Ignore
+	public void testJsonPropertiesUnmerge() throws Exception {
+		logger.info("Deserializing JSON: " + MERGED_JSON);
+		Annotation annot = JSONUtils.fromJSON(MERGED_JSON, Annotation.class);
+		Assert.assertTrue(annot instanceof NewAnnotation);
+		NewAnnotation newAnnot = (NewAnnotation) annot;
+		Assert.assertEquals("newPropValue", newAnnot.getNewProp());
+		JSONObject props = (JSONObject) JSON.parse(annot.getJsonProperties());
+
+		Assert.assertNotNull(props.get("oldProp"));
+		Assert.assertEquals("oldPropValue", props.get("oldProp"));
+
+		JSONObject jo = JSONUtils.toJSONObject(annot);
+		Assert.assertEquals(MERGED_JSON, jo.toString());
+	}
+
+	final private static String PROFILING_ANNOTATION_JSON = "{" +
+			"\"profiledObject\": null," +
+			"\"annotationType\": \"MySubType1\"," +
+			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType1\"," +
+			"\"analysisRun\": \"bla\"," +
+			"\"newProp1\": 42," +
+			"\"newProp2\": \"hi\"," +
+			"\"newProp3\": \"hello\"" +
+		"}";
+
+	final private static String CLASSIFICATION_ANNOTATION_JSON = "{" +
+			"\"classifyingObject\": null," +
+			"\"classifiedObject\": null," +
+			"\"annotationType\": \"MySubType2\"," +
+			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType2\"," +
+			"\"analysisRun\": \"bla\"," +
+			"\"newProp1\": 42," +
+			"\"newProp2\": \"hi\"," +
+			"\"newProp3\": \"hello\"" +
+		"}";
+
+	final private static String RELATIONSHIP_ANNOTATION_JSON = "{" +
+			"\"relatedObjects\": null," +
+			"\"annotationType\": \"MySubType3\"," +
+			"\"javaClass\": \"org.apache.atlas.odf.core.integrationtest.metadata.atlas.MySubType3\"," +
+			"\"analysisRun\": \"bla\"," +
+			"\"newProp1\": 42," +
+			"\"newProp2\": \"hi\"," +
+			"\"newProp3\": \"hello\"" +
+		"}";
+
+	 /**
+	  *  Replacement for AtlasAnnotationTypeDefinitionCreatTest
+	  */
+
+	@Test
+	public void testSimpleAnnotationPrototypeCreation() throws Exception {
+		logger.info("Annotation string: " + PROFILING_ANNOTATION_JSON);
+		Annotation annot = JSONUtils.fromJSON(PROFILING_ANNOTATION_JSON, Annotation.class);
+		logger.info("Annotation: " + PROFILING_ANNOTATION_JSON);
+		Assert.assertTrue(annot instanceof ProfilingAnnotation);
+
+		logger.info("Annotation string: " + CLASSIFICATION_ANNOTATION_JSON);
+		annot = JSONUtils.fromJSON(CLASSIFICATION_ANNOTATION_JSON, Annotation.class);
+		logger.info("Annotation: " + CLASSIFICATION_ANNOTATION_JSON);
+		Assert.assertTrue(annot instanceof ClassificationAnnotation);
+
+		logger.info("Annotation string: " + RELATIONSHIP_ANNOTATION_JSON);
+		annot = JSONUtils.fromJSON(RELATIONSHIP_ANNOTATION_JSON, Annotation.class);
+		logger.info("Annotation: " + RELATIONSHIP_ANNOTATION_JSON);
+		Assert.assertTrue(annot instanceof RelationshipAnnotation);
+	}
+
+	@Test
+	public void testUnretrievedReference() throws Exception {
+		String repoId = "SomeRepoId";
+		Column col = new Column();
+		col.setName("name");
+		col.setReference(InvalidReference.createInvalidReference(repoId));
+
+		String json = JSONUtils.toJSON(col);
+		Column col2 = JSONUtils.fromJSON(json, Column.class);
+		Assert.assertTrue(InvalidReference.isInvalidRef(col2.getReference()));
+
+		Database db = new Database();
+		db.setName("database");
+
+		JSONUtils.toJSON(db);
+
+		db.setConnections(InvalidReference.createInvalidReferenceList(repoId));
+
+		Database db2 = JSONUtils.fromJSON(JSONUtils.toJSON(db), Database.class);
+		Assert.assertTrue(InvalidReference.isInvalidRefList(db2.getConnections()));
+	}
+
+	@Test
+	public void testExtensibleDiscoveryServiceEndpoints() throws Exception {
+		DiscoveryServiceProperties dsprops = new DiscoveryServiceProperties();
+		dsprops.setId("theid");
+		dsprops.setName("thename");
+
+		DiscoveryServiceEndpoint ep = new DiscoveryServiceEndpoint();
+		ep.setRuntimeName("newruntime");
+		ep.set("someKey", "someValue");
+		dsprops.setEndpoint(ep);
+
+		String dspropsJSON = JSONUtils.toJSON(dsprops);
+		logger.info("Discovery service props JSON: " +dspropsJSON);
+
+		DiscoveryServiceProperties deserProps = JSONUtils.fromJSON(dspropsJSON, DiscoveryServiceProperties.class);
+		Assert.assertNotNull(deserProps);
+		Assert.assertEquals("theid", dsprops.getId());
+		Assert.assertEquals("thename", dsprops.getName());
+		Assert.assertNotNull(deserProps.getEndpoint());
+		Assert.assertTrue(deserProps.getEndpoint() instanceof DiscoveryServiceEndpoint);
+		Assert.assertTrue(deserProps.getEndpoint().getClass().equals(DiscoveryServiceEndpoint.class));
+		DiscoveryServiceEndpoint deserEP = (DiscoveryServiceEndpoint) deserProps.getEndpoint();
+		Assert.assertEquals("newruntime", deserEP.getRuntimeName());
+		Assert.assertEquals("someValue", deserEP.get().get("someKey"));
+	}
+
+	@Test
+	public void testMetaDataCache() {
+		MetaDataCache cache = new MetaDataCache();
+
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId("id");
+		ref.setRepositoryId("repositoryId");
+		DataFile dataFile = new DataFile();
+		dataFile.setName("dataFile");
+		dataFile.setEncoding("encoding");
+		dataFile.setReference(ref);
+
+		List<MetaDataObjectReference> refList = new ArrayList<MetaDataObjectReference>();
+		refList.add(ref);
+		StoredMetaDataObject storedObject = new StoredMetaDataObject(dataFile);
+		HashMap<String, List<MetaDataObjectReference>> referenceMap = new HashMap<String, List<MetaDataObjectReference>>();
+		referenceMap.put("id", refList);
+		storedObject.setReferencesMap(referenceMap);
+		List<StoredMetaDataObject> metaDataObjects = new ArrayList<StoredMetaDataObject>();
+		metaDataObjects.add(storedObject);
+		cache.setMetaDataObjects(metaDataObjects);
+
+		Connection con = new JDBCConnection();
+		con.setName("connection");
+		JDBCConnectionInfo conInfo = new JDBCConnectionInfo();
+		conInfo.setConnections(Collections.singletonList(con));
+		conInfo.setAssetReference(ref);
+		conInfo.setTableName("tableName");
+		List<ConnectionInfo> connectionInfoObjects = new ArrayList<ConnectionInfo>();
+		connectionInfoObjects.add(conInfo);
+		cache.setConnectionInfoObjects(connectionInfoObjects);
+
+		try {
+			String serializedCache = JSONUtils.toJSON(cache);
+			logger.info("Serialized metadata cache JSON: " + serializedCache);
+			MetaDataCache deserializedCache = JSONUtils.fromJSON(serializedCache, MetaDataCache.class);
+			Assert.assertEquals("dataFile", deserializedCache.getMetaDataObjects().get(0).getMetaDataObject().getName());
+			Assert.assertEquals("encoding", ((DataFile) deserializedCache.getMetaDataObjects().get(0).getMetaDataObject()).getEncoding());
+			Assert.assertEquals("connection", deserializedCache.getConnectionInfoObjects().get(0).getConnections().get(0).getName());
+			Assert.assertEquals("tableName", ((JDBCConnectionInfo) deserializedCache.getConnectionInfoObjects().get(0)).getTableName());
+			Assert.assertEquals("repositoryId", deserializedCache.getMetaDataObjects().get(0).getReferenceMap().get("id").get(0).getRepositoryId());
+		}
+		catch (JSONException e) {
+			e.printStackTrace();
+		}
+	}
+
+
+}
diff --git a/odf/odf-archetype-discoveryservice/.gitignore b/odf/odf-archetype-discoveryservice/.gitignore
new file mode 100755
index 0000000..67c976b
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/.gitignore
@@ -0,0 +1,17 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+ .settings
+target
+.classpath
+.project
diff --git a/odf/odf-archetype-discoveryservice/pom.xml b/odf/odf-archetype-discoveryservice/pom.xml
new file mode 100755
index 0000000..c9c2aed
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/pom.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-archetype-discoveryservice</artifactId>
+	<packaging>maven-archetype</packaging>
+
+	<description>The SDP maven archetype for discovery services</description>
+
+	<properties>
+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+	</properties>
+
+	<build>
+		<extensions>
+			<extension>
+				<groupId>org.apache.maven.archetype</groupId>
+				<artifactId>archetype-packaging</artifactId>
+				<version>2.4</version>
+			</extension>
+		</extensions>
+
+		<pluginManagement>
+			<plugins>
+				<plugin>
+					<artifactId>maven-archetype-plugin</artifactId>
+					<version>2.4</version>
+				</plugin>
+			</plugins>
+		</pluginManagement>
+	</build>
+
+</project>
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml b/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml
new file mode 100755
index 0000000..9848e46
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/META-INF/maven/archetype.xml
@@ -0,0 +1,27 @@
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<archetype>
+  <id>odf-archetype-discoveryservice-jar</id>
+  <sources>
+    <source>src/main/java/MyAnnotation.java</source>
+    <source>src/main/java/MyDiscoveryService.java</source>
+  </sources>
+  <resources>
+    <resource>src/main/resources/META-INF/odf/odf-services.json</resource>
+  </resources>
+  <testSources>
+    <source>src/test/java/MyDiscoveryServiceTest.java</source>
+  </testSources>
+</archetype>
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml
new file mode 100755
index 0000000..0ada9e8
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/pom.xml
@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<groupId>${groupId}</groupId>
+	<artifactId>${artifactId}</artifactId>
+	<version>${version}</version>
+	<packaging>jar</packaging>
+
+	<properties>
+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+	</dependencies>
+</project>
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java
new file mode 100755
index 0000000..8ce0d2f
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyAnnotation.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package ${package};
+
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+
+public class MyAnnotation extends ProfilingAnnotation {
+
+	private String myProperty;
+
+	public String getMyProperty() {
+		return myProperty;
+	}
+
+	public void setMyProperty(String myValue) {
+		this.myProperty = myValue;
+	}
+
+}
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java
new file mode 100755
index 0000000..a07ccdb
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/java/MyDiscoveryService.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package ${package};
+
+import java.util.Collections;
+import java.util.Date;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
+import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+/**
+ * A simple synchronous discovery service that creates one annotation for the data set it analyzes.
+ *
+ */
+public class MyDiscoveryService extends SyncDiscoveryServiceBase {
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		// 1. create an annotation that annotates the data set object passed in the request
+		MyAnnotation annotation = new MyAnnotation();
+		annotation.setProfiledObject(request.getDataSetContainer().getDataSet().getReference());
+		// set a new property called "tutorialProperty" to some string
+		annotation.setMyProperty("My property was created on " + new Date());
+
+		// 2. create a response with our annotation created above
+		return createSyncResponse( //
+				ResponseCode.OK, // Everything works OK
+				"Everything worked", // human-readable message
+				Collections.singletonList(annotation) // new annotations
+		);
+	}
+
+}
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json
new file mode 100755
index 0000000..e90ce7b
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/main/resources/META-INF/odf/odf-services.json
@@ -0,0 +1,11 @@
+[
+  {
+	"id": "${groupId}.${artifactId}.MyDiscoveryService",
+	"name": "My service",
+	"description": "My service creates my annotation for a data set",
+	"endpoint": {
+		"runtimeName": "Java",
+		"className": "${package}.MyDiscoveryService"
+	}
+  }
+]
diff --git a/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java
new file mode 100755
index 0000000..bc585d2
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/main/resources/archetype-resources/src/test/java/MyDiscoveryServiceTest.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package ${package};
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Unit test template for discovery service
+ */
+public class MyDiscoveryServiceTest {
+
+	@Test
+	public void test() throws Exception {
+		Assert.assertTrue(true);
+	}
+}
diff --git a/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties
new file mode 100755
index 0000000..9fbb593
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/archetype.properties
@@ -0,0 +1,23 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+archetype.groupId=org.apache.atlas.odf
+archetype.artifactId=odf-archetype-discoveryservice-jar
+archetype.version=1.2.0-SNAPSHOT
+
+groupId=jg1
+artifactId=ja1
+version=0.1
+package=odf.j.p1.p2
+
diff --git a/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt
new file mode 100755
index 0000000..31ed2f8
--- /dev/null
+++ b/odf/odf-archetype-discoveryservice/src/test/resources/projects/it1/goal.txt
@@ -0,0 +1 @@
+clean verify
diff --git a/odf/odf-atlas/.gitignore b/odf/odf-atlas/.gitignore
new file mode 100755
index 0000000..174a0a7
--- /dev/null
+++ b/odf/odf-atlas/.gitignore
@@ -0,0 +1,20 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+.DS_Store
+derby.log
diff --git a/odf/odf-atlas/atlasconfig/jetty-web.xml b/odf/odf-atlas/atlasconfig/jetty-web.xml
new file mode 100755
index 0000000..66ec730
--- /dev/null
+++ b/odf/odf-atlas/atlasconfig/jetty-web.xml
@@ -0,0 +1,24 @@
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<Configure class="org.eclipse.jetty.webapp.WebAppContext">
+	<Get name="securityHandler">
+		<Set name="loginService">
+			<New class="org.eclipse.jetty.security.HashLoginService">
+				<Set name="name">ODF Realm</Set>
+				<Set name="config"><SystemProperty name="atlas.home" default="."/>/conf/realm.properties</Set>
+			</New>
+		</Set>
+	</Get>
+</Configure>
diff --git a/odf/odf-atlas/atlasconfig/realm.properties b/odf/odf-atlas/atlasconfig/realm.properties
new file mode 100755
index 0000000..0d57c4a
--- /dev/null
+++ b/odf/odf-atlas/atlasconfig/realm.properties
@@ -0,0 +1,24 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Credentials for Atlas basic authentication
+#
+# Format:
+# <username>: <password>[,<rolename> ...]
+#
+# Password is stored in obfuscated format.
+# Re-generate password using the org.eclipse.jetty.util.security.Password class in the jetty lib folder.
+# Example:
+# cd jetty-distribution-<version>/lib
+# java -cp jetty-util-<version>.jar org.eclipse.jetty.util.security.Password <plain password>
+atlas: OBF:1v1p1s3m1w1s1wtw1u3019q71u2a1wui1w1q1s3g1v2p,user
diff --git a/odf/odf-atlas/build_atlas.xml b/odf/odf-atlas/build_atlas.xml
new file mode 100755
index 0000000..8b6de87
--- /dev/null
+++ b/odf/odf-atlas/build_atlas.xml
@@ -0,0 +1,265 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="build_atlas">
+	<dirname property="script.basedir" file="${ant.file.build_atlas}" />
+	<property name="atlas-dir" value="apache-atlas-${atlas.version}" />
+	<!-- Properties  provided by pom.xml: -->
+	<!-- <property name="atlas-unpack-dir" value="" /> -->
+	<!-- <property name="atlas.version" value="" /> -->
+
+	<property name="atlas-archive" value="/tmp/${atlas-dir}-bin.zip" />
+
+	<condition property="is-windows">
+		<os family="windows">
+		</os>
+	</condition>
+
+	<condition property="is-unix">
+		<os family="unix">
+		</os>
+	</condition>
+
+	<condition property="is-mac">
+		<os family="mac">
+		</os>
+	</condition>
+
+	<condition property="atlas-zip-not-found">
+		<not>
+			<available file="${atlas-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="atlas-unpacked">
+	   <available file="${atlas-unpack-dir}/${atlas-dir}/bin/atlas_start.py"/>
+    </condition>
+
+	<condition property="atlas-running">
+		<available file="${atlas-unpack-dir}/${atlas-dir}/logs/atlas.pid"/>
+	</condition>
+
+	<condition property="running-build-process">
+		<equals arg1="${atlas-unpack-dir}" arg2="/tmp"/>
+	</condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-atlas" if="atlas-zip-not-found">
+		<echo message="Downloading Apache Atlas 0.7-incubating-release. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<!-- Make sure to update text message when moving to a new Atlas release / revision -->
+		<get verbose="true" src="https://ibm.box.com/shared/static/ftwi0wlpjtyv3nnvyh354epayqfwynsn.zip" dest="${atlas-archive}" />
+		<echo message="Atlas downloaded" />
+	</target>
+
+	<target name="unzip-atlas" unless="atlas-unpacked">
+		<antcall target="download-atlas"/>
+		<echo message="Installing Atlas test instance" />
+		<echo message="Deleting ${atlas-unpack-dir}/${atlas-dir}" />
+		<delete dir="${atlas-unpack-dir}/${atlas-dir}" failonerror="false" />
+		<echo message="deleted" />
+		<chmod file="${atlas-unpack-dir}/${atlas-archive}" perm="755" os="unix,mac"/>
+		<unzip src="${atlas-archive}" dest="${atlas-unpack-dir}" />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="stop-atlas" if="atlas-unpacked">
+		<echo message="Stopping atlas server if it exists" />
+		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
+			<env key="JAVA_HOME" value="${java.home}" />
+        		<arg value="atlas_stop.py" />
+    		</exec>
+		<sleep seconds="10" />
+	</target>
+
+	<target name="ensure-atlas-stopped" depends="print-info" unless="use.running.atlas">
+		<echo message="Ensure Atlas is stopped..."/>
+		<antcall target="stop-atlas"/>
+		<delete file="${atlas-unpack-dir}/${atlas-dir}/logs/atlas.pid"/>
+		<echo message="Atlas is stopped."/>
+	</target>
+
+	<target name="remove-atlas-dir" depends="ensure-atlas-stopped" if="running-build-process">
+    	<echo message="Resetting atlas data"/>
+    	<delete dir="/tmp/${atlas-dir}" />
+    	<echo message="Atlas directory deleted"/>
+	</target>
+
+	<target name="reset-derby-data">
+    	<echo message="Resetting derby DB"/>
+    	<delete dir="/tmp/odf-derby" />
+	</target>
+
+	<target name="restart-atlas-on-windows" if="is-windows">
+		<antcall target="start-atlas"/>
+		<antcall target="stop-atlas"/>
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="start-atlas">
+		<echo message="Starting atlas server" />
+		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
+			<env key="JAVA_HOME" value="${java.home}/.." />
+			<arg value="atlas_start.py" />
+		</exec>
+		<echo message="Waiting for Atlas Server to start..." />
+		<waitfor maxwait="60" maxwaitunit="second">
+			<socket server="localhost" port="21443" />
+		</waitfor>
+	</target>
+
+	<target name="check-atlas-url">
+		<fail>
+			<condition>
+				<not>
+					<socket server="localhost" port="21443" />
+				</not>
+			</condition>
+		</fail>
+	</target>
+
+	<target name="prepare-atlas" unless="atlas-running">
+		<antcall target="unzip-atlas"/>
+		<antcall target="enable-atlas-ssl"/>
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="import-atlas-sampledata-win" if="is-windows">
+		<echo message="Importing sample data" />
+		<exec executable="cmd">
+			<env key="JAVA_HOME" value="${java.home}" />
+			<arg value="/c" />
+			<arg value="${atlas-unpack-dir}/${atlas-dir}/bin/quick_start.py" />
+		</exec>
+
+		<echo message="Atlas test instance brought up" />
+	</target>
+
+	<target name="import-atlas-sampledata-unix" if="is-unix">
+		<echo message="Importing sample data" />
+		<exec dir="${atlas-unpack-dir}/${atlas-dir}/bin" executable="python">
+			<env key="JAVA_HOME" value="${java.home}" />
+			<arg value="quick_start.py" />
+		</exec>
+
+		<echo message="Atlas test instance brought up" />
+	</target>
+
+	<target name="import-atlas-sampledata" depends="import-atlas-sampledata-win,import-atlas-sampledata-unix">
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="select-atlas-config-file-windows" if="is-windows">
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_windows" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
+		<echo message="Using atlas SSL configuration for Windows." />
+	</target>
+
+	<target name="select-atlas-config-file-mac" if="is-mac">
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_mac" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
+		<echo message="Using atlas SSL configuration for Mac OS." />
+	</target>
+
+	<target name="select-atlas-config-file-unix" if="is-unix">
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_linux" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" overwrite="true"/>
+		<echo message="Using atlas SSL configuration for Unix." />
+	</target>
+
+	<target name="select-atlas-config-file" depends="select-atlas-config-file-unix,select-atlas-config-file-windows,select-atlas-config-file-mac">
+	</target>
+
+	<target name="unquote-colons-in-atlas-config-file">
+		<!-- The following replacement is needed because the ant propertyfile task quotes colons and backslashed-->
+		<replace file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
+			<replacetoken>\:</replacetoken>
+			<replacevalue>:</replacevalue>
+		</replace>
+		<replace file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
+			<replacetoken>\\</replacetoken>
+			<replacevalue>\</replacevalue>
+		</replace>
+	</target>
+
+	<target name="enable-atlas-ssl">
+		<!-- For Atlas security features see: http://atlas.incubator.apache.org/Security.html -->
+		<echo message="Updating atlas-application.properties file..." />
+		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
+			<entry  key="cert.stores.credential.provider.path" value="jceks://file/${sys:atlas.home}/conf/keystore_openjdk.jceks"/>
+			<entry  key="atlas.enableTLS" value="true"/>
+			<entry  key="truststore.file" value="${sys:atlas.home}/conf/keystore_openjdk.jks"/>
+			<entry  key="keystore.file" value="${sys:atlas.home}/conf/keystore_openjdk.jks"/>
+			<entry  key="atlas.server.https.port" value="21443"/>
+			<entry  key="atlas.DeleteHandler.impl" value="org.apache.atlas.repository.graph.HardDeleteHandler"/>
+			<entry  key="atlas.TypeCache.impl" value="org.apache.atlas.repository.typestore.StoreBackedTypeCache"/>
+		</propertyfile>
+		<antcall target="unquote-colons-in-atlas-config-file"/>
+		<!-- Keep this version of the config file for Mac (using oracle/open jdk) -->
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_mac" overwrite="true"/>
+
+		<!-- Create separate version of config file for Linux (using ibm jdk) -->
+		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
+			<entry  key="cert.stores.credential.provider.path" value="jceks://file/${sys:atlas.home}/conf/keystore_ibmjdk.jceks"/>
+			<entry  key="truststore.file" value="${sys:atlas.home}/conf/keystore_ibmjdk.jks"/>
+			<entry  key="keystore.file" value="${sys:atlas.home}/conf/keystore_ibmjdk.jks"/>
+		</propertyfile>
+		<antcall target="unquote-colons-in-atlas-config-file"/>
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_linux" overwrite="true"/>
+
+		<!-- Create separate version of config file for Windows (using ibm jdk and hardcoded credential provider file (issue #94)) -->
+		<propertyfile file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties">
+			<entry  key="cert.stores.credential.provider.path" value="jceks://file/C\:/tmp/${atlas-dir}/conf/keystore_ibmjdk.jceks"/>
+		</propertyfile>
+		<antcall target="unquote-colons-in-atlas-config-file"/>
+		<copy file="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties" tofile="${atlas-unpack-dir}/${atlas-dir}/conf/atlas-application.properties_windows" overwrite="true"/>
+
+		<!-- keystore.jceks file is stored in Box@IBM - Re-generate the file using Atlas command bin/cputil.sh -->
+		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
+		<get verbose="true" src="https://ibm.box.com/shared/static/uyzqeayk5ut5f5fqnlvm8nhn9ixb642d.jceks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_openjdk.jceks" />
+		<get verbose="true" src="https://ibm.box.com/shared/static/ibopoyukw7uhbt83a1zu33nwvnamht3j.jceks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_ibmjdk.jceks" />
+		<!-- keystore.jks file is stored in Box@IBM - Re-generate the file using the Java keytool -->
+		<!-- command: keytool -genkey -alias myatlas -keyalg RSA -keystore /tmp/atlas-security/keystore.jks -keysize 2048 -->
+		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
+		<get verbose="true" src="https://ibm.box.com/shared/static/odnmhqua5sdue03z43vqsv0lp509ov70.jks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_openjdk.jks" />
+		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${atlas-unpack-dir}/${atlas-dir}/conf/keystore_ibmjdk.jks" />
+
+		<antcall target="select-atlas-config-file"/>
+		<echo message="Atlas SSL has been enabled." />
+		<!-- On windows, Atlas needs to be re-started again in order for the kafka queues to come up properly -->
+		<antcall target="restart-atlas-on-windows" />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+	<target name="print-info" if="use.running.atlas">
+		<echo message="Don't start/stop Atlas because use.running.atlas is set" />
+	</target>
+
+	<target name="clean-atlas" depends="print-info" unless="use.running.atlas">
+		<echo message="Cleaning Atlas" />
+		<antcall target="remove-atlas-dir"/>
+		<antcall target="reset-derby-data"/>
+	</target>
+
+	<target name="ensure-atlas-running" depends="print-info" unless="use.running.atlas">
+		<echo message="Ensure that Atlas is running" />
+		<antcall target="prepare-atlas" />
+		<antcall target="start-atlas"/>
+		<antcall target="check-atlas-url"/>
+		<echo message="Atlas is running" />
+	</target>
+
+</project>
diff --git a/odf/odf-atlas/pom.xml b/odf/odf-atlas/pom.xml
new file mode 100755
index 0000000..cc714e6
--- /dev/null
+++ b/odf/odf-atlas/pom.xml
@@ -0,0 +1,216 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns:if="ant:if">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-atlas</artifactId>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-store</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-spark</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-spark</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.derby</groupId>
+			<artifactId>derby</artifactId>
+			<version>10.12.1.1</version>
+			<scope>test</scope>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-failsafe-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+						<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>
+						<atlas.url>${atlas.url}</atlas.url>
+						<atlas.user>${atlas.user}</atlas.user>
+						<atlas.password>${atlas.password}</atlas.password>
+					</systemPropertyVariables>
+					<dependenciesToScan>
+						<dependency>org.apache.atlas.odf:odf-core</dependency>
+					</dependenciesToScan>
+					<includes>
+						<include>**/integrationtest/**/**.java</include>
+					</includes>
+				</configuration>
+				<executions>
+					<execution>
+						<id>integration-test</id>
+						<goals>
+							<goal>integration-test</goal>
+						</goals>
+					</execution>
+					<execution>
+						<id>verify</id>
+						<goals>
+							<goal>verify</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+						<atlas.url>${atlas.url}</atlas.url>
+						<atlas.user>${atlas.user}</atlas.user>
+						<atlas.password>${atlas.password}</atlas.password>
+					</systemPropertyVariables>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<inherited>false</inherited>
+						<id>clean-atlas</id>
+						<phase>clean</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="atlas-unpack-dir" value="/tmp"/>
+								<property name="atlas.version" value="${atlas.version}"/>
+								<ant antfile="build_atlas.xml" target="clean-atlas"/>
+							</target>
+						</configuration>
+					</execution>
+					<execution>
+						<id>ensure-atlas-running</id>
+						<phase>process-test-classes</phase>
+						<!-- <phase>pre-integration-test</phase> -->
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target unless="skipTests">
+								<property name="atlas-unpack-dir" value="/tmp" />
+								<property name="atlas.version" value="${atlas.version}" />
+								<ant antfile="build_atlas.xml" target="ensure-atlas-running"></ant>
+							</target>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+
+	<profiles>
+		<profile>
+			<id>atlas</id>
+			<build>
+				<plugins>
+					<plugin>
+						<groupId>org.apache.maven.plugins</groupId>
+						<artifactId>maven-antrun-plugin</artifactId>
+						<version>1.8</version>
+						<executions>
+							<!-- Start Atlas even in order to have it available for the test-env when skipping the tests  -->
+							<execution>
+								<id>ensure-atlas-running</id>
+								<phase>process-test-classes</phase>
+								<!-- <phase>pre-integration-test</phase> -->
+								<goals>
+									<goal>run</goal>
+								</goals>
+								<configuration>
+									<target unless="skipTests">
+										<property name="atlas-unpack-dir" value="/tmp" />
+										<property name="atlas.version" value="${atlas.version}" />
+										<ant antfile="build_atlas.xml" target="ensure-atlas-running"></ant>
+									</target>
+								</configuration>
+							</execution>
+						</executions>
+					</plugin>
+				</plugins>
+			</build>
+		</profile>
+	</profiles>
+
+</project>
diff --git a/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java
new file mode 100755
index 0000000..04a1bc3
--- /dev/null
+++ b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasMetadataStore.java
@@ -0,0 +1,842 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata.atlas;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.security.GeneralSecurityException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.connectivity.RESTClientManager;
+import org.apache.atlas.odf.api.metadata.*;
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.metadata.SampleDataHelper;
+import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
+import org.apache.atlas.odf.core.metadata.WritableMetadataStoreBase;
+import org.apache.atlas.odf.core.metadata.WritableMetadataStoreUtils;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.http.entity.ContentType;
+import org.apache.wink.json4j.JSONArray;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
+import org.apache.atlas.odf.api.metadata.AtlasMetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.RESTMetadataStoreHelper;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import com.google.common.collect.Lists;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
+
+// TODO properly escape all URLs when constructed as string concatenation
+
+/**
+ *
+ * A MetadataStore implementation for accessing metadata stored in an atlas instance
+ *
+ */
+public class AtlasMetadataStore extends WritableMetadataStoreBase implements MetadataStore, WritableMetadataStore {
+	private Logger logger = Logger.getLogger(AtlasMetadataStore.class.getName());
+
+	private static HashMap<String, StoredMetaDataObject> objectStore; // Not actually used but required to meet needs of InternalMetadataStoreBase
+	protected LinkedHashMap<String, StoredMetaDataObject> stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
+	protected static Object accessLock = new Object();
+
+	private String url;
+
+	private String storeId;
+
+	private RESTClientManager restClient;
+
+	private AtlasModelBridge modelBridge;
+
+	static String ATLAS_API_INFIX = "/api/atlas";
+
+	private void constructThis(String url, String user, String password) throws URISyntaxException {
+		this.url = url;
+		this.storeId = "atlas:" + url;
+		this.restClient = new RESTClientManager(new URI(url), user, password);
+		this.modelBridge = new AtlasModelBridge(this);
+	}
+
+	public AtlasMetadataStore() throws URISyntaxException {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		String atlasURL = env.getProperty("atlas.url");
+		String atlasUser = env.getProperty("atlas.user");
+		String atlasPassword = env.getProperty("atlas.password");
+		if ((atlasURL == null) || atlasURL.isEmpty() || (atlasUser == null) || atlasUser.isEmpty() || (atlasPassword == null) || atlasPassword.isEmpty())  {
+			throw new RuntimeException("The system variables \"atlas.url\", \"atlas.user\", and \"atlas.password\" must be set.");
+		}
+		constructThis(atlasURL, atlasUser, Encryption.decryptText(atlasPassword));
+	}
+
+	protected Object getAccessLock() {
+		return accessLock;
+	}
+
+	// Not actually used but required to meet needs of InternalMetadataStoreBase
+	protected HashMap<String, StoredMetaDataObject> getObjects() {
+		return objectStore;
+	}
+
+	protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects() {
+		return stagedObjects;
+	}
+
+	public static final int TIMEOUT = 2000;
+
+	static Object ensureTypesLock = new Object();
+
+	public void ensureODFTypesExist() {
+		synchronized (ensureTypesLock) {
+			try {
+				String typesTestURI = this.url + ATLAS_API_INFIX + "/types/MetaDataObject";
+				Executor executor = this.restClient.getAuthenticatedExecutor();
+				HttpResponse httpResponse = executor.execute(Request.Get(typesTestURI)).returnResponse();
+
+				StatusLine statusLine = httpResponse.getStatusLine();
+				int statusCode = statusLine.getStatusCode();
+				if (statusCode == HttpStatus.SC_OK) {
+					return;
+				}
+				if (statusCode != HttpStatus.SC_NOT_FOUND) {
+					throw new MetadataStoreException("An error occurred when checking for Atlas types. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
+				}
+				// now create types
+				InputStream is = this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json");
+				Request createTypesRequest = Request.Post(this.url + ATLAS_API_INFIX + "/types");
+				createTypesRequest.bodyStream(is, ContentType.APPLICATION_JSON);
+				httpResponse = executor.execute(createTypesRequest).returnResponse();
+				statusLine = httpResponse.getStatusLine();
+				statusCode = statusLine.getStatusCode();
+				if (statusCode != HttpStatus.SC_CREATED) {
+					throw new MetadataStoreException("An error occurred while creating ODF types in Atlas. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
+				}
+			} catch (GeneralSecurityException | IOException e) {
+				logger.log(Level.FINE, "An unexpected exception ocurred while connecting to Atlas", e);
+				throw new MetadataStoreException(e);
+			}
+		}
+
+	}
+
+	private void checkConnectivity() {
+		ensureODFTypesExist();
+	}
+
+	/* Filter out all types that exist.
+	 * This is necessary because trying to create a type multiple times
+	 * will lead to a 503 error after Atlas is restarted with an error saying
+	 * "Type extends super type multiple times"
+	 *
+	 * Returns true if some filteringTookPlace
+	 *
+	 * Note: Trying to remove the super types from the request doesn't work either.
+	 */
+	boolean filterExistingTypes(JSONObject atlasTypeDefinitions, String typeProperty) throws GeneralSecurityException, IOException {
+		boolean filterWasApplied = false;
+		JSONArray types = (JSONArray) atlasTypeDefinitions.opt(typeProperty);
+		JSONArray newTypes = new JSONArray();
+		for (Object typeObj : types) {
+			JSONObject type = (JSONObject) typeObj;
+
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			String typeName = (String) type.opt("typeName");
+			if (typeName != null) {
+				Request checkTypeRequest = Request.Get(this.url + ATLAS_API_INFIX + "/types/" + typeName);
+				HttpResponse httpResponse = executor.execute(checkTypeRequest).returnResponse();
+				StatusLine statusLine = httpResponse.getStatusLine();
+				int statusCode = statusLine.getStatusCode();
+				if (statusCode != HttpStatus.SC_NOT_FOUND) {
+					// type already exists, don't create it
+					filterWasApplied = true;
+					logger.log(Level.FINE, "Atlas type ''{0}'' already exists, don't create it again", typeName);
+				} else {
+					newTypes.add(type);
+				}
+			}
+		}
+
+		try {
+			atlasTypeDefinitions.put(typeProperty, newTypes);
+		} catch (JSONException e) {
+			throw new RuntimeException(e); // should never happen as only proper JSONObjects are used
+		}
+		return filterWasApplied;
+	}
+
+	boolean isInvalidTypeRequest(JSONObject atlasTypeDefinition) {
+		return ((JSONArray) atlasTypeDefinition.opt("structTypes")).isEmpty() //
+				&& ((JSONArray) atlasTypeDefinition.opt("enumTypes")).isEmpty() //
+				&& ((JSONArray) atlasTypeDefinition.opt("classTypes")).isEmpty() //
+				&& ((JSONArray) atlasTypeDefinition.opt("traitTypes")).isEmpty();
+	}
+
+	void checkUpdateForKnownType(JSONObject atlasTypeDefinition) {
+		JSONArray types = (JSONArray) atlasTypeDefinition.opt("classTypes");
+		for (Object o : types) {
+			JSONObject type = (JSONObject) o;
+			String typeName = (String) type.opt("typeName");
+			if ("ODFAnnotation".equals(typeName)) {
+				String msg = MessageFormat.format("Update of type ''{0}'' is not allowed", typeName);
+				throw new MetadataStoreException(msg);
+			}
+		}
+	}
+
+	public boolean createType(JSONObject atlasTypeDefinition) {
+		try {
+			logger.log(Level.FINE, "Creating types with definition: {0}", atlasTypeDefinition.write());
+			checkConnectivity();
+			boolean filterWasApplied = this.filterExistingTypes(atlasTypeDefinition, "classTypes");
+			filterWasApplied |= this.filterExistingTypes(atlasTypeDefinition, "structTypes");
+			String typesDef = atlasTypeDefinition.write();
+			if (filterWasApplied) {
+				logger.log(Level.FINE, "Modified type definitions after filtering exiting types: {0}", typesDef);
+			}
+			if (isInvalidTypeRequest(atlasTypeDefinition)) {
+				logger.log(Level.FINE, "No types left to be created after filtering, skipping");
+				return false;
+			}
+			Executor executor = this.restClient.getAuthenticatedExecutor();
+			Request createTypesRequest = Request.Put(this.url + ATLAS_API_INFIX + "/types");
+			createTypesRequest.bodyStream(new ByteArrayInputStream(typesDef.getBytes("UTF-8")), ContentType.APPLICATION_JSON);
+			HttpResponse httpResponse = executor.execute(createTypesRequest).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int statusCode = statusLine.getStatusCode();
+			if (statusCode != HttpStatus.SC_OK) {
+				throw new MetadataStoreException("An error occurred while creating ODF types in Atlas. Code: " + statusCode + ", reason: " + statusLine.getReasonPhrase());
+			}
+			logger.log(Level.FINE, "Types created. Original request: {0}", typesDef);
+		} catch (GeneralSecurityException | IOException | JSONException e) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", e);
+			throw new MetadataStoreException(e);
+		}
+		return true;
+
+	}
+
+	public JSONObject getAtlasTypeDefinition(String typeName) {
+		try {
+			checkConnectivity();
+			HttpResponse httpResponse = this.restClient.getAuthenticatedExecutor().execute(Request.Get(this.url + ATLAS_API_INFIX + "/types/" + typeName)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int statusCode = statusLine.getStatusCode();
+			if (statusCode == HttpStatus.SC_OK) {
+				InputStream is = httpResponse.getEntity().getContent();
+				JSONObject typeResp = new JSONObject(is);
+				is.close();
+				return typeResp;
+			}
+			return null;
+		} catch (GeneralSecurityException | IOException | JSONException e) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", e);
+			throw new MetadataStoreException(e);
+		}
+	}
+
+    @Override
+    public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
+   		return WritableMetadataStoreUtils.getConnectionInfo(this, informationAsset);
+    };
+
+	@Override
+	public MetaDataObject retrieve(MetaDataObjectReference reference) {
+		checkConnectivity();
+		synchronized (updateLock) {
+			return this.retrieve(reference, 0);
+		}
+	}
+
+	MetaDataObject retrieve(MetaDataObjectReference reference, int level) {
+		JSONObject objectJson = retrieveAtlasEntityJson(reference);
+		if (objectJson == null) {
+			return null;
+		}
+		try {
+			MetaDataObject mdo = this.modelBridge.createMetaDataObjectFromAtlasEntity(objectJson, level);
+			return mdo;
+		} catch (JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	JSONObject retrieveAtlasEntityJson(MetaDataObjectReference reference) {
+		modelBridge.checkReference(reference);
+		String id = reference.getId();
+		try {
+			String resource = url + ATLAS_API_INFIX + "/entities/" + id;
+			HttpResponse httpResponse = this.restClient.getAuthenticatedExecutor().execute(Request.Get(resource)).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code == HttpStatus.SC_NOT_FOUND) {
+				return null;
+			}
+			if (code != HttpStatus.SC_OK) {
+				String msg = MessageFormat.format("Retrieval of object ''{0}'' failed: HTTP request status: ''{1}'', {2}",
+						new Object[] { id, statusLine.getStatusCode(), statusLine.getReasonPhrase() });
+				throw new MetadataStoreException(msg);
+			} else {
+				InputStream is = httpResponse.getEntity().getContent();
+				JSONObject jo = new JSONObject(is);
+				is.close();
+				return jo;
+			}
+		} catch (GeneralSecurityException | IOException | JSONException exc) {
+			logger.log(Level.WARNING, "An unexpected exception ocurred while connecting to Atlas", exc);
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	// TODO only helps in single server case
+	// this is just a temporary workaround around the fact that Atlas does not update bidirectional
+	// references.
+	// TODO this currently prevents deadlocks from happening, needs to be reworked for distributed case
+	static Object updateLock = new Object();
+
+	private MetaDataObjectReference storeJSONObject(JSONObject jsonObject) {
+		logger.log(Level.FINEST, "Storing converted Atlas object: {0}.", JSONUtils.jsonObject4Log(jsonObject));
+		synchronized(updateLock) {
+			try {
+				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+				HttpResponse atlasResponse = restExecutor.execute( //
+						Request.Post(this.url + ATLAS_API_INFIX + "/entities") //
+								.bodyString(jsonObject.write(), ContentType.APPLICATION_JSON) //
+				).returnResponse();
+				InputStream is = atlasResponse.getEntity().getContent();
+				JSONObject atlasResult = new JSONObject(is);
+				is.close();
+				StatusLine line = atlasResponse.getStatusLine();
+				int statusCode = line.getStatusCode();
+				if (statusCode != HttpStatus.SC_CREATED) {
+					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
+					logger.log(Level.WARNING, "Atlas could not create object for request: {0}", jsonObject.write());
+					logger.log(Level.WARNING, "Atlas result for creating object: {0}", atlasResult.write());
+					throw new MetadataStoreException(
+							MessageFormat.format("Atlas REST call failed, return code: {0}, reason: {1}, details: {2}", new Object[] { statusCode, line.getReasonPhrase(), atlasResult.write() }));
+				}
+				logger.log(Level.FINEST, "Atlas response for storing object: {0}", JSONUtils.jsonObject4Log(atlasResult));
+				JSONArray ids = (JSONArray) ((JSONObject) atlasResult.get("entities")).get("created");
+				if (ids.size() != 1) {
+					String msg = "More than one (or no) Atlas entities have been created. Need a unique entity to be referenced by other objects.";
+					throw new MetadataStoreException(msg);
+				}
+				String newAnnotationId = (String) ids.get(0);
+				MetaDataObjectReference result = new MetaDataObjectReference();
+				result.setRepositoryId(getRepositoryId());
+				result.setId(newAnnotationId);
+				result.setUrl(getURL(newAnnotationId));
+				return result;
+			} catch (JSONException e) {
+				throw new MetadataStoreException(MessageFormat.format("Error converting JSON object ''{0}'' to string", JSONUtils.jsonObject4Log(jsonObject)), e);
+			} catch(IOException | GeneralSecurityException e2) {
+				throw new MetadataStoreException(MessageFormat.format("Error storing object ''{0}'' in Atlas", JSONUtils.jsonObject4Log(jsonObject)), e2);
+			}
+		}
+	}
+
+	private void updateJSONObject(JSONObject jsonObject, String id) {
+		logger.log(Level.FINEST, "Updating converted Atlas object: {0}.",JSONUtils.jsonObject4Log(jsonObject));
+		synchronized(updateLock) {
+			try {
+				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+				HttpResponse atlasResponse = restExecutor.execute( //
+						Request.Post(this.url + ATLAS_API_INFIX + "/entities/" + id) //
+								.bodyString(jsonObject.write(), ContentType.APPLICATION_JSON) //
+				).returnResponse();
+				InputStream is = atlasResponse.getEntity().getContent();
+				JSONObject atlasResult = new JSONObject(is);
+				is.close();
+				StatusLine line = atlasResponse.getStatusLine();
+				int statusCode = line.getStatusCode();
+				if (statusCode != HttpStatus.SC_OK) {
+					logger.log(Level.WARNING, "Atlas could not update object with request: {0}", jsonObject.write());
+					throw new MetadataStoreException(
+							MessageFormat.format("Atlas REST call failed, return code: {0}, reason: {1}, details: {2}", new Object[] { statusCode, line.getReasonPhrase(), atlasResult.write() }));
+				}
+				logger.log(Level.FINEST, "Atlas response for updating object: {0}", JSONUtils.jsonObject4Log(atlasResult));
+			} catch (JSONException e) {
+				throw new MetadataStoreException(MessageFormat.format("Error converting JSON object ''{0}'' to string", JSONUtils.jsonObject4Log(jsonObject)), e);
+			} catch(IOException | GeneralSecurityException e2) {
+				throw new MetadataStoreException(MessageFormat.format("Error storing object ''{0}'' in Atlas", JSONUtils.jsonObject4Log(jsonObject)), e2);
+			}
+		}
+	}
+
+	private MetaDataObjectReference store(Annotation annot) {
+		checkConnectivity();
+		synchronized (updateLock) {
+			try {
+				JSONObject annotationJSON = this.modelBridge.createAtlasEntityJSON(new StoredMetaDataObject(annot), new HashMap<String, String>(), new HashMap<String, MetaDataObjectReference>(), null);
+				MetaDataObjectReference newObjectRef = storeJSONObject(annotationJSON);
+
+				////////////////////////////////////////
+				// set inverse explicitly, remove this until Atlas does it automatically
+
+				// first get full annotated object
+				String annotatedObjectId = AnnotationStoreUtils.getAnnotatedObject(annot).getId();
+				Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+				HttpResponse atlasResponse = restExecutor.execute(Request.Get(this.url + ATLAS_API_INFIX + "/entities/" + annotatedObjectId)).returnResponse();
+				StatusLine line = atlasResponse.getStatusLine();
+				int statusCode = line.getStatusCode();
+				if (statusCode != HttpStatus.SC_OK) {
+					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
+					logger.log(Level.WARNING, "Atlas could not retrieve annotated object: {0}", annotatedObjectId);
+					return null;
+				}
+
+				InputStream is = atlasResponse.getEntity().getContent();
+				JSONObject annotatedObject = new JSONObject(is).getJSONObject("definition");
+				is.close();
+				JSONObject annotatedObjectValues = ((JSONObject) annotatedObject.get("values"));
+				JSONArray annotations = (JSONArray) annotatedObjectValues.opt("annotations");
+
+				// add new "annotations" object to list
+				if (annotations == null) {
+					annotations = new JSONArray();
+					annotatedObjectValues.put("annotations", annotations);
+				}
+				JSONObject annotationRef = modelBridge.createAtlasObjectReference(newObjectRef.getId(), "ODFAnnotation");
+				annotations.add(annotationRef);
+
+				// now update
+				atlasResponse = restExecutor.execute(Request.Post(this.url + ATLAS_API_INFIX + "/entities/" + annotatedObjectId).bodyString(annotatedObject.write(), ContentType.APPLICATION_JSON))
+						.returnResponse();
+				line = atlasResponse.getStatusLine();
+				statusCode = line.getStatusCode();
+				if (statusCode != HttpStatus.SC_OK) {
+					logger.log(Level.SEVERE, "Atlas REST call failed, return code: {0}, reason: {1}", new Object[] { statusCode, line.getReasonPhrase() });
+					logger.log(Level.WARNING, "Atlas could not update annotated object: {0}", annotatedObjectId);
+					return null;
+				}
+
+				return newObjectRef;
+			} catch (MetadataStoreException e) {
+				throw e;
+			} catch (Exception e) {
+				throw new MetadataStoreException(e);
+			}
+		}
+	}
+
+	private boolean deleteAcyclic(MetaDataObjectReference reference,
+			                      HashSet<MetaDataObjectReference> referencesProcessed) {
+		try {
+			List<Annotation> annotations = this.getAnnotations(reference, null);
+			if (annotations != null) {
+				for (Annotation annotation : annotations) {
+					if (referencesProcessed.contains(annotation.getReference())) {
+						throw new MetadataStoreException("Circular annotation definition found: " + annotation.getReference().getRepositoryId());
+					}
+					referencesProcessed.add(annotation.getReference());
+					deleteAcyclic(annotation.getReference(), referencesProcessed);
+				}
+			}
+			URIBuilder uri = new URIBuilder(url + ATLAS_API_INFIX + "/entities").addParameter("guid", reference.getId());
+			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+			HttpResponse httpResponse = restExecutor.execute(Request.Delete(uri.build())).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
+			}
+			InputStream is = httpResponse.getEntity().getContent();
+			JSONObject jo = new JSONObject(is);
+			is.close();
+			if (jo.containsKey("entities")) {
+				JSONObject entities = jo.getJSONObject("entities");
+				if (entities.containsKey("deleted")) {
+					JSONArray deleted = entities.getJSONArray("deleted");
+					return (deleted.size() == 1 && deleted.getString(0).equals(reference.getId()));
+				}
+			}
+			return false;
+		} catch(Exception exc) {
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	// TODO: Implement 'delete cascade'. Currently this only works for annotations but not for other types of object relationships
+
+	private boolean delete(MetaDataObjectReference reference) {
+		checkConnectivity();
+		return deleteAcyclic(reference, new HashSet<MetaDataObjectReference>());
+	}
+
+	@Override
+	public Properties getProperties() {
+		Properties props = new Properties();
+		props.put(STORE_PROPERTY_DESCRIPTION, MessageFormat.format("An Atlas metadata repository at ''{0}''", url));
+		props.put(STORE_PROPERTY_TYPE, "atlas");
+		props.put(STORE_PROPERTY_ID, this.storeId);
+		props.put("atlas.url", url);
+		return props;
+	}
+
+	/**
+	 * Returns a "human-readable" URL for this object, typically pointing to the Atlas UI.
+	 */
+	public String getURL(String guid) {
+		return url + "/#!/detailPage/" + guid;
+	}
+
+	public String getAtlasUrl() {
+		return this.url;
+	}
+
+	@Override
+	/**
+	 * Search query is passed into generic API (Gremlin, DSL, or fulltext) are selected under the covers.
+	 */
+	public List<MetaDataObjectReference> search(String query) {
+		checkConnectivity();
+		try {
+			URIBuilder uri = null;
+			HttpResponse httpResponse = null;
+			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+			if (query.startsWith("g.V")) {
+				uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search/gremlin").addParameter("query", query);
+				httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
+			} else {
+				uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search").addParameter("query", query);
+				httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
+			}
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				throw new MetadataStoreException("Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
+			}
+			InputStream is = httpResponse.getEntity().getContent();
+			JSONObject jo = new JSONObject(is);
+			is.close();
+			String querytype = (String) jo.get("queryType");
+
+			String repoId = getRepositoryId();
+			List<MetaDataObjectReference> resultMDORs = new ArrayList<>();
+			JSONArray resultList = (JSONArray) jo.get("results");
+			for (Object o : resultList) {
+				JSONObject result = (JSONObject) o;
+				String guid = null;
+				// get GUID differently depending on the query type
+				if ("gremlin".equals(querytype)) {
+					guid = (String) result.get("__guid");
+				} else if ("dsl".equals(querytype)) {
+					guid = (String) ((JSONObject) result.get("$id$")).get("id");
+				} else {
+					guid = (String) result.get("guid");
+				}
+				MetaDataObjectReference ref = new MetaDataObjectReference();
+				ref.setId(guid);
+				ref.setRepositoryId(repoId);
+				ref.setUrl(getURL(guid));
+				resultMDORs.add(ref);
+			}
+			return resultMDORs;
+		} catch (Exception exc) {
+			throw new MetadataStoreException(exc);
+		}
+
+	}
+
+	@Override
+	public String getRepositoryId() {
+		return this.storeId;
+	}
+
+	@Override
+	public MetadataStore.ConnectionStatus testConnection() {
+		return RESTMetadataStoreHelper.testConnectionForStaticURL(restClient, url);
+	}
+
+	// Make sure Atlas objects are deleted in a particular order according to foreign key relationships to prevent objects from becoming orphans
+	private static final String[] deletionSequence = new String[]{"Annotation", "BusinessTerm", "DataStore", "DataFileFolder", "DataSet" };
+
+	@Override
+	public void resetAllData() {
+		logger.info("Resetting all data on the metadata repository");
+		for (String typeToDelete:deletionSequence) {
+			List<MetaDataObjectReference> refs = this.search("from " + typeToDelete);
+			int i = 0;
+			for (MetaDataObjectReference ref : refs) {
+				try {
+					this.delete(ref);
+					i++;
+				} catch(Exception exc) {
+					logger.log(Level.WARNING, MessageFormat.format("Object ''{0}'' could not be deleted", ref.getId()), exc);
+				}
+			}
+			logger.info(i + " objects of type " + typeToDelete + " deleted.");
+		}
+	}
+
+	public Annotation retrieveAnnotation(MetaDataObjectReference annotationRef) {
+		MetaDataObject mdo = this.retrieve(annotationRef);
+		if (mdo instanceof Annotation) {
+			return (Annotation) mdo;
+		}
+		throw new MetadataStoreException(MessageFormat.format("Object with id ''{0}'' is not an annotation", annotationRef.getId()));
+	}
+
+
+
+	@SuppressWarnings("unchecked")
+	private List<JSONObject> runAnnotationQuery(String query) {
+		try {
+			List<JSONObject> results = new ArrayList<>();
+			Executor restExecutor = this.restClient.getAuthenticatedExecutor();
+			URIBuilder uri = new URIBuilder(url + ATLAS_API_INFIX + "/discovery/search/dsl").addParameter("query",
+					query);
+			HttpResponse httpResponse = restExecutor.execute(Request.Get(uri.build())).returnResponse();
+			StatusLine statusLine = httpResponse.getStatusLine();
+			int code = statusLine.getStatusCode();
+			if (code != HttpStatus.SC_OK) {
+				throw new MetadataStoreException(
+						"Search request failed: " + statusLine.getStatusCode() + ", " + statusLine.getReasonPhrase());
+			}
+			InputStream is = httpResponse.getEntity().getContent();
+			JSONObject jo = new JSONObject(is);
+			is.close();
+			results.addAll(jo.getJSONArray("results"));
+			return results;
+		} catch (Exception exc) {
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	private String combineToWhereClause(List<String> clauses) {
+		StringBuilder whereClause = null;
+		for (String clause : clauses) {
+			if (clause != null) {
+				if (whereClause == null) {
+					whereClause = new StringBuilder("where ");
+					whereClause.append(clause);
+				} else {
+					whereClause.append(" and ").append(clause);
+				}
+			}
+		}
+		if (whereClause == null) {
+			whereClause = new StringBuilder("");
+		}
+		return whereClause.toString();
+	}
+
+	private List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId) {
+		checkConnectivity();
+
+		String profilingAnnotationObjectClause = null;
+		String classificationAnnotationObjectClause = null;
+		String analysisRequestClause = null;
+		if (object != null) {
+		 	profilingAnnotationObjectClause = "t.profiledObject.__guid = '" + object.getId() + "'";
+		 	classificationAnnotationObjectClause = "t.classifiedObject.__guid = '" + object.getId() + "'";
+		}
+		if (analysisRequestId != null) {
+			analysisRequestClause = "t.analysisRun = '" + analysisRequestId + "'";
+		}
+
+		List<JSONObject> queryResults = new ArrayList<>();
+		queryResults.addAll(runAnnotationQuery(
+				"from ProfilingAnnotation as t " + combineToWhereClause(Arrays.asList(new String[]{profilingAnnotationObjectClause, analysisRequestClause})) ));
+		queryResults.addAll(runAnnotationQuery(
+				"from ClassificationAnnotation as t " + combineToWhereClause(Arrays.asList(new String[]{classificationAnnotationObjectClause, analysisRequestClause})) ));
+		// TODO relationship annotation
+
+		try {
+			List<Annotation> results = new ArrayList<>();
+			for (JSONObject jo : queryResults) {
+				results.add((Annotation) this.modelBridge.createMetaDataObjectFromAtlasSearchResult(jo, 0));
+			}
+			return results;
+		} catch (Exception exc) {
+			exc.printStackTrace();
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	@Override
+	public void createSampleData() {
+		logger.log(Level.INFO, "Creating sample data in metadata store.");
+		SampleDataHelper.copySampleFiles();
+		WritableMetadataStoreUtils.createSampleDataObjects(this);
+	}
+
+	@Override
+	public MetadataQueryBuilder newQueryBuilder() {
+		return new AtlasMetadataQueryBuilder();
+	}
+
+	public static void main(String[] args) {
+		try {
+			System.out.println("Creating Atlas sample data.");
+			AtlasMetadataStore mds = new AtlasMetadataStore();
+			mds.createSampleData();
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+	}
+
+	@Override
+	public AnnotationPropagator getAnnotationPropagator() {
+		return new AnnotationPropagator() {
+
+			@Override
+			public void propagateAnnotations(AnnotationStore as, String requestId) {
+				if (as instanceof AtlasMetadataStore) {
+					// do nothing, annotations already persisted
+					return;
+				}
+				// if this is another annotation store, simply store the annotations as-is
+				List<Annotation> annotations = as.getAnnotations(null, requestId);
+				for (Annotation annot : annotations) {
+					store(annot);
+				}
+			}
+		};
+	}
+
+	@Override
+	public void commit() {
+		checkConnectivity();
+		HashMap<String, StoredMetaDataObject> objectHashMap = new HashMap<String, StoredMetaDataObject>();
+		HashMap<String, String> typeMap = new HashMap<String, String>();
+		for (StoredMetaDataObject object : stagedObjects.values()) {
+			MetaDataObjectReference objRef = object.getMetaDataObject().getReference();
+			modelBridge.checkReference(objRef);
+			objectHashMap.put(objRef.getId(), object);
+			typeMap.put(objRef.getId(), object.getMetaDataObject().getClass().getSimpleName());
+		}
+
+		// Create a list of all objects, starting with "root objects" that do not have dependencies on the subsequent objects
+		List<StoredMetaDataObject> objectsToCreate = new ArrayList<StoredMetaDataObject>();
+		int numberOfObjectsToCreate;
+		do {
+			List<StoredMetaDataObject> rootObjectList = modelBridge.getRootObjects(objectHashMap);
+			numberOfObjectsToCreate = objectsToCreate.size();
+			objectsToCreate.addAll(rootObjectList);
+			for (StoredMetaDataObject rootObject : rootObjectList) {
+				objectHashMap.remove(rootObject.getMetaDataObject().getReference().getId());
+			}
+		} while((objectHashMap.size() > 0) && (objectsToCreate.size() > numberOfObjectsToCreate));
+
+		// Process object list in reverse order so that dependent objects are created first
+		HashMap<String, MetaDataObjectReference> referenceMap = new HashMap<String, MetaDataObjectReference>();
+		for (StoredMetaDataObject obj : Lists.reverse(objectsToCreate)) {
+			if (retrieve(obj.getMetaDataObject().getReference()) != null) {
+				// Update existing object
+				JSONObject originalAtlasJson = retrieveAtlasEntityJson(obj.getMetaDataObject().getReference());
+				JSONObject newObjectJSON = modelBridge.createAtlasEntityJSON(obj, typeMap, referenceMap, originalAtlasJson);
+				logger.log(Level.INFO, "Updating object of type ''{0}'' in metadata store: ''{1}''", new Object[] { obj.getClass().getName(), newObjectJSON });
+				updateJSONObject(newObjectJSON, obj.getMetaDataObject().getReference().getId());
+			} else {
+				// Create new object
+				JSONObject newObjectJSON = modelBridge.createAtlasEntityJSON(obj, typeMap, referenceMap, null);
+				logger.log(Level.INFO, "Storing new object of type ''{0}'' in metadata store: ''{1}''", new Object[] { obj.getClass().getName(), newObjectJSON });
+				referenceMap.put(obj.getMetaDataObject().getReference().getId(), storeJSONObject(newObjectJSON)); // Store new object id in reference map
+			}
+		}
+	}
+
+	@Override
+	public MetaDataObject getParent(MetaDataObject metaDataObject) {
+		String queryString = "";
+		Class<? extends MetaDataObject> type = MetaDataObject.class;
+		String objectId = metaDataObject.getReference().getId();
+		if (metaDataObject instanceof Column) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__RelationalDataSet.columns\").toList()";
+			type = RelationalDataSet.class;
+		} else if (metaDataObject instanceof Connection) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataStore.connections\").toList()";
+			type = DataStore.class;
+		} else if (metaDataObject instanceof DataFileFolder) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataFileFolder.dataFileFolders\").toList()";
+			type = DataFileFolder.class;
+		} else if (metaDataObject instanceof DataFile) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__DataFileFolder.dataFiles\").toList()";
+			type = DataFileFolder.class;
+		} else if (metaDataObject instanceof Schema) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__Database.schemas\").toList()";
+			type = Database.class;
+		} else if (metaDataObject instanceof Table) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").in(\"__Schema.tables\").toList()";
+			type = Schema.class;
+		}
+		List<MetaDataObjectReference> parentList = search(queryString);
+		if (parentList.size() == 1) {
+			return InternalMetaDataUtils.getObjectList(this, parentList, type).get(0);
+		} else if (parentList.size() == 0) {
+			return null;
+		}
+		String errorMessage = MessageFormat.format("Inconsistent object reference: Metadata object with id ''{0}'' refers to more that one parent object.", metaDataObject.getReference().getId());
+		throw new MetadataStoreException(errorMessage);
+	}
+
+	protected <T> List<T> getReferences(String attributeName, MetaDataObject metaDataObject, Class<T> type) {
+		String queryString = "";
+		String objectId = metaDataObject.getReference().getId();
+		if (MetadataStoreBase.ODF_COLUMNS_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__RelationalDataSet.columns\").toList()";
+		} else if (MetadataStoreBase.ODF_CONNECTIONS_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataStore.connections\").toList()";
+		} else if (MetadataStoreBase.ODF_DATAFILEFOLDERS_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataFileFolder.dataFileFolders\").toList()";
+		} else if (MetadataStoreBase.ODF_DATAFILES_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__DataFileFolder.dataFiles\").toList()";
+		} else if (MetadataStoreBase.ODF_SCHEMAS_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__Database.schemas\").toList()";
+		} else if (MetadataStoreBase.ODF_TABLES_REFERENCE.equals(attributeName)) {
+			queryString = "g.V.has(\"__guid\", \"" + objectId + "\").out(\"__Schema.tables\").toList()";
+		}
+		return InternalMetaDataUtils.getObjectList(this, search(queryString), type);
+	};
+
+}
diff --git a/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java
new file mode 100755
index 0000000..d06d8b5
--- /dev/null
+++ b/odf/odf-atlas/src/main/java/org/apache/atlas/odf/core/metadata/atlas/AtlasModelBridge.java
@@ -0,0 +1,409 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata.atlas;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Field;
+import java.lang.reflect.ParameterizedType;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONArray;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreBase;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+import org.apache.atlas.odf.api.metadata.UnknownMetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+
+/**
+ * This class converts ODF objects to Atlas objects / REST API requests
+ * and vice versa.
+ *
+ *
+ */
+public class AtlasModelBridge {
+	Logger logger = Logger.getLogger(AtlasModelBridge.class.getName());
+	MetadataStore mds;
+
+    private static final HashMap<String, String> referenceNameMap = new HashMap<String, String>();
+    static {
+        referenceNameMap.put(MetadataStoreBase.ODF_COLUMNS_REFERENCE, "columns");
+        referenceNameMap.put(MetadataStoreBase.ODF_CONNECTIONS_REFERENCE, "connections");
+        referenceNameMap.put(MetadataStoreBase.ODF_DATAFILEFOLDERS_REFERENCE, "dataFileFolders");
+        referenceNameMap.put(MetadataStoreBase.ODF_DATAFILES_REFERENCE, "dataFiles");
+        referenceNameMap.put(MetadataStoreBase.ODF_SCHEMAS_REFERENCE, "schemas");
+        referenceNameMap.put(MetadataStoreBase.ODF_TABLES_REFERENCE, "tables");
+    }
+
+	public AtlasModelBridge(MetadataStore mds) {
+		this.mds = mds;
+	}
+
+	static ODFSettings getODFConfig() {
+		ODFSettings odfconf = new ODFFactory().create().getSettingsManager().getODFSettings();
+		return odfconf;
+	}
+
+	private boolean isAtlasType(Object atlasJson, String className) {
+		if ((atlasJson instanceof JSONObject) && ((JSONObject) atlasJson).containsKey("jsonClass")) {
+			Object jsonClass = ((JSONObject) atlasJson).opt("jsonClass");
+			if (jsonClass instanceof String) {
+				return jsonClass.toString().equals(className);
+			}
+		}
+		return false;
+	}
+
+	private Object convertAtlasJsonToODF(Object atlasJson, int level) throws JSONException {
+		Object resultObj = atlasJson;
+		if (atlasJson instanceof JSONObject) {
+			JSONObject valJson = (JSONObject) atlasJson;
+			if (isAtlasType(valJson, "org.apache.atlas.typesystem.json.InstanceSerialization$_Id")) {
+				// JSON object is reference to other object
+				String id = (String) valJson.get("id");
+				resultObj = createODFReferenceJSON(level, id);
+			} else if ("org.apache.atlas.typesystem.json.InstanceSerialization$_Reference".equals(valJson.opt("jsonClass"))) {
+				// treat References the same as IDs
+				JSONObject idObj = (JSONObject) valJson.get("id");
+				String id = (String) idObj.get("id");
+				resultObj = createODFReferenceJSON(level, id);
+			} else if (valJson.opt("$typeName$") != null && (valJson.opt("id") instanceof String)) {
+				// this only happens if the object was retrieved via the /discovery/search resource and not through /entities
+				resultObj = createODFReferenceJSON(level, valJson.getString("id"));
+			} else {
+				JSONObject convertedJSONObject = new JSONObject();
+				// always remove annotations property as it is no longer part of MetaDataObject
+				valJson.remove("annotations");
+
+				// Remove referenes to other objects because they are not attributes of the corresponding metadata objects
+				for (String referenceName : referenceNameMap.values()) {
+					valJson.remove(referenceName);
+				}
+
+				for (Object key : valJson.keySet()) {
+					Object value = valJson.get(key);
+					convertedJSONObject.put(key, convertAtlasJsonToODF(value, level + 1));
+				}
+				if (isAtlasType(convertedJSONObject, "org.apache.atlas.typesystem.json.InstanceSerialization$_Struct") && (convertedJSONObject.containsKey("values"))) {
+					// Remove Atlas struct object
+					convertedJSONObject = (JSONObject) convertedJSONObject.get("values");
+				}
+				resultObj = convertedJSONObject;
+			}
+		} else if (atlasJson instanceof JSONArray) {
+			JSONArray arr = (JSONArray) atlasJson;
+			JSONArray convertedArray = new JSONArray();
+			for (Object o : arr) {
+				// don't increase level if traversing an array
+				convertedArray.add(convertAtlasJsonToODF(o, level));
+			}
+			resultObj = convertedArray;
+		}
+		return resultObj;
+	}
+
+
+	private JSONObject createODFReferenceJSON(int level, String id) throws JSONException {
+		JSONObject mdoref = new JSONObject();
+		mdoref.put("id", id);
+		mdoref.put("repositoryId", this.mds.getRepositoryId());
+		mdoref.put("url", (String) this.mds.getProperties().get("atlas.url"));
+		return mdoref;
+	}
+
+	public MetaDataObject createMetaDataObjectFromAtlasSearchResult(JSONObject json, int level) throws JSONException {
+		String guid = (String) ((JSONObject) json.get("$id$")).get("id");
+		String typeName = json.getString("$typeName$");
+		json.remove("$id$");
+		json.remove("$typeName$");
+		MetaDataObject mdo = createMDOSkeletonForType(level, json, typeName);
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId(guid);
+		ref.setRepositoryId(this.mds.getRepositoryId());
+		ref.setUrl((String) this.mds.getProperties().get("atlas.url"));
+		mdo.setReference(ref);
+		return mdo;
+	}
+
+	public MetaDataObject createMetaDataObjectFromAtlasEntity(JSONObject json, int level) throws JSONException {
+		String guid = (String) (((JSONObject) ((JSONObject) json.get("definition")).get("id")).get("id"));
+		MetaDataObject mdo = createMDOSkeleton(json, level);
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId(guid);
+		ref.setRepositoryId(this.mds.getRepositoryId());
+		ref.setUrl((String) this.mds.getProperties().get("atlas.url"));
+		mdo.setReference(ref);
+		return mdo;
+	}
+
+	private MetaDataObject createMDOSkeleton(JSONObject json, int level) {
+		try {
+			JSONObject def = (JSONObject) json.get("definition");
+			if (def != null) {
+				JSONObject values = (JSONObject) def.get("values");
+				if (values != null) {
+					String typeName = (String) def.get("typeName");
+					if (typeName != null) {
+						return createMDOSkeletonForType(level, values, typeName);
+					}
+				}
+			}
+		} catch (Exception exc) {
+			// interpret all exceptions as "incorrect format"
+			String msg = "Conversion of JSON to metadata object failed, using default";
+			logger.log(Level.WARNING, msg, exc);
+		}
+		// fallback, create generic MDO
+		return new UnknownMetaDataObject();
+	}
+
+
+	private MetaDataObject createMDOSkeletonForType(int level, JSONObject values, String typeName)
+			throws JSONException {
+		MetaDataObject result = new UnknownMetaDataObject(); // Unknown by default
+		Class<?> cl;
+		//TODO: Move MetaDataObject.java into models package and use this instead of DataSet
+		String fullClassName = DataSet.class.getPackage().getName() + "." + typeName;
+		try {
+			cl = Class.forName(fullClassName);
+		} catch (ClassNotFoundException e) {
+			String messageText = MessageFormat.format("Cannot fine class ''{0}''.", fullClassName);
+			throw new MetadataStoreException(messageText, e);
+		}
+		if (cl != null) {
+			JSONObject modifiedValues = (JSONObject) this.convertAtlasJsonToODF(values, level);
+			if (typeName.equals("ProfilingAnnotation") || typeName.equals("ClassificationAnnotation") || typeName.equals("RelationshipAnnotation")) {
+				result = (MetaDataObject) JSONUtils.fromJSON(modifiedValues.write(), Annotation.class);
+			} else {
+				modifiedValues.put("javaClass", cl.getName());
+				result = (MetaDataObject) JSONUtils.fromJSON(modifiedValues.write(), cl);
+			}
+		}
+		return result;
+	}
+	@SuppressWarnings("rawtypes")
+	public JSONObject createAtlasEntityJSON(StoredMetaDataObject storedObject, HashMap<String, String> typeMap, HashMap<String, MetaDataObjectReference> referenceMap, JSONObject originalAtlasJson) {
+		JSONObject objectJson = null;
+		MetaDataObject object = storedObject.getMetaDataObject();
+		try {
+			logger.log(Level.FINE, "Storing instance of " + object.getClass().getName());
+			JSONObject valuesJSON = JSONUtils.toJSONObject(object); // Initialize value JSON with attributes from MetaDataObject
+			valuesJSON.remove("reference"); // Remove object reference because it must not be stored in Atlas
+			Class<?> cl = object.getClass();
+			while (cl != MetaDataObject.class) {  // process class hierarchy up to but excluding MetaDataObject
+				Field fields[] = cl.getDeclaredFields();
+				for (Field f: fields) {
+					f.setAccessible(true);
+					try {
+						Class<?> fieldClass = f.getType();
+						Object fieldObject = f.get(object);
+						if (fieldObject != null) {
+							String fieldName = f.getName();
+							if (fieldClass.getName().equals(List.class.getName())) {
+								// Process reference lists which are stored in attributes of the actuals MetaDataObject, e.g. for Annotations
+						        ParameterizedType stringListType = (ParameterizedType) f.getGenericType();
+						        if (!((List) fieldObject).isEmpty()) {
+							        Class<?> listElementClass = (Class<?>) stringListType.getActualTypeArguments()[0];
+							        if (listElementClass.equals(MetaDataObjectReference.class)) {
+										JSONArray referenceArray = new JSONArray();
+										@SuppressWarnings("unchecked")
+										List<MetaDataObjectReference> members = (List<MetaDataObjectReference>) fieldObject;
+										for (MetaDataObjectReference mdor : members) {
+											String referenceId = ((MetaDataObjectReference) mdor).getId();
+											if (referenceMap.containsKey(referenceId)) {
+												referenceArray.add(createAnnotatedObjectReference(referenceMap.get(referenceId),typeMap.get(referenceId)));
+											} else {
+												referenceArray.add(createAnnotatedObjectReference(mdor, mds.retrieve(mdor).getClass().getSimpleName()));
+											}
+										}
+										valuesJSON.put(fieldName, referenceArray);
+							        }
+						        }
+							} else if (fieldClass == MetaDataObjectReference.class) {
+								// Process individual references which are stored in attributes of the actuals MetaDataObject, e.g. for Annotations
+								String referenceId = ((MetaDataObjectReference) fieldObject).getId();
+								if (referenceMap.containsKey(referenceId)) {
+									valuesJSON.put(fieldName, createAnnotatedObjectReference(referenceMap.get(referenceId), "MetaDataObject"));
+								} else {
+									valuesJSON.put(fieldName, createAnnotatedObjectReference((MetaDataObjectReference) fieldObject, "MetaDataObject"));
+								}
+							} else {
+								valuesJSON.put(fieldName, fieldObject);
+							}
+						}
+					} catch (IllegalAccessException e) {
+						throw new IOException(e);
+					}
+				}
+				cl = cl.getSuperclass();
+			}
+
+			// Store references to other objects which are not attributes of the MetaDataObject
+			for(String referenceType : mds.getReferenceTypes()) {
+				String atlasReferenceName = referenceNameMap.get(referenceType);
+				// Add references of original Atlas object
+				JSONArray referenceArray = new JSONArray();
+				if ((originalAtlasJson != null) && (originalAtlasJson.get("definition") != null)) {
+					JSONObject values = originalAtlasJson.getJSONObject("definition").getJSONObject("values");
+					if ((values != null) && (values.containsKey(atlasReferenceName))) {
+						if (values.get(atlasReferenceName) instanceof JSONArray) {
+							referenceArray = values.getJSONArray(atlasReferenceName);
+						}
+					}
+				}
+				if (storedObject.getReferenceMap().containsKey(referenceType)) {
+					// Add new references for the reference type
+					for (MetaDataObjectReference mdor : storedObject.getReferenceMap().get(referenceType)) {
+						String referenceId = ((MetaDataObjectReference) mdor).getId();
+						if (referenceMap.containsKey(referenceId)) {
+							referenceArray.add(createAnnotatedObjectReference(referenceMap.get(referenceId),typeMap.get(referenceId)));
+						} else {
+							referenceArray.add(createAnnotatedObjectReference(mdor, mds.retrieve(mdor).getClass().getSimpleName()));
+						}
+					}
+				}
+				if (referenceArray.size() > 0) {
+					valuesJSON.put(atlasReferenceName, referenceArray);
+				}
+			}
+
+			String objectType;
+			if (object instanceof Annotation) {
+				objectType = (object instanceof ProfilingAnnotation) ? "ProfilingAnnotation" :
+					(object instanceof ClassificationAnnotation) ? "ClassificationAnnotation" :
+					"RelationshipAnnotation";
+			} else {
+				objectType = object.getClass().getSimpleName();
+			}
+			if (originalAtlasJson != null) {
+				// When updating an existing object, its must point to the correct object id in Atlas
+				objectJson = this.createAtlasEntitySkeleton(objectType, object.getReference().getId());
+			} else {
+				// For new objects, a generic id is used
+				objectJson = this.createAtlasEntitySkeleton(objectType, null);
+			}
+			objectJson.put("values", valuesJSON);
+		} catch (IOException exc) {
+			throw new MetadataStoreException(exc);
+		}
+		catch (JSONException exc) {
+			throw new MetadataStoreException(exc);
+		}
+		return objectJson;
+	}
+
+	/**
+	 * Create an empty Atlas object of a certain type of a certain guid.
+	 * Can be used in entity POST requests for creating or (partial) update
+	 */
+	private JSONObject createAtlasEntitySkeleton(String typeName, String guid) {
+		try {
+			JSONObject obj = null;
+			obj = new JSONObject(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json"));
+			obj.put("typeName", typeName);
+			JSONObject id = (JSONObject) obj.get("id");
+			id.put("typeName", typeName);
+			if (guid != null) {
+				id.put("id", guid);
+			}
+			return obj;
+		} catch (JSONException exc) {
+			throw new MetadataStoreException(exc);
+		}
+	}
+
+	/**
+	 * check if the reference belongs to this repository. Throw exception if not.
+	 */
+	void checkReference(MetaDataObjectReference reference) {
+		if (reference == null) {
+			throw new MetadataStoreException("Reference cannot be null");
+		}
+		if ((reference.getRepositoryId() != null) && !reference.getRepositoryId().equals(mds.getRepositoryId())) {
+			throw new MetadataStoreException(
+					MessageFormat.format("Repository ID ''{0}'' of reference does not match the one of this repository ''{1}''", new Object[] { reference.getRepositoryId(), mds.getRepositoryId() }));
+		}
+	}
+
+	/**
+	 * create an Atlas object reference that can be used whenever Atlas uses references in JSON requests
+	 */
+	public JSONObject createAtlasObjectReference(String guid, String typeName) {
+		JSONObject ref;
+		try {
+			InputStream is = this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json");
+			ref = new JSONObject(is);
+			is.close();
+			ref.put("id", guid);
+			ref.put("typeName", typeName);
+		} catch (IOException | JSONException e) {
+			// should not go wrong
+			throw new RuntimeException(e);
+		}
+		return ref;
+	}
+
+	public JSONObject createAnnotatedObjectReference(MetaDataObjectReference annotatedObjectRef, String typeName) {
+		this.checkReference(annotatedObjectRef);
+		String annotatedObjectId = annotatedObjectRef.getId();
+		return this.createAtlasObjectReference(annotatedObjectId, typeName);
+	}
+
+	public List<StoredMetaDataObject> getRootObjects(HashMap<String, StoredMetaDataObject> objectHashMap) {
+		List<StoredMetaDataObject> rootObjectList = new ArrayList<StoredMetaDataObject>();
+		for (StoredMetaDataObject object : objectHashMap.values()) {
+			if (isRootObject(object, objectHashMap)) {
+				rootObjectList.add(object);
+			}
+		}
+		return rootObjectList;
+	}
+
+	private boolean isRootObject(StoredMetaDataObject object, HashMap<String, StoredMetaDataObject> objectHashMap) {
+		String objectId = object.getMetaDataObject().getReference().getId();
+		try {
+			for (StoredMetaDataObject currentObject : objectHashMap.values()) {
+				String currentObjectId = currentObject.getMetaDataObject().getReference().getId();
+				if (!currentObjectId.equals(objectId)) {
+					// If it is not the object itself, check whether the current object contains a reference to the object
+					if (JSONUtils.toJSON(currentObject).contains(objectId)) {
+						// If it does, it cannot be a root object
+						return false;
+					}
+				}
+			}
+			return true;
+		} catch (JSONException e) {
+			throw new MetadataStoreException(MessageFormat.format("Error converting object of class ''{0}'' to JSON string", object.getClass().getName()), e);
+		}
+	}
+
+}
diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
new file mode 100755
index 0000000..f2630f6
--- /dev/null
+++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
@@ -0,0 +1,444 @@
+{
+	"enumTypes": [],
+	"structTypes": [],
+	"traitTypes": [],
+	"classTypes": [
+		{
+			"superTypes": [],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "MetaDataObject",
+			"typeDescription": "The base open metadata object.",
+			"attributeDefinitions": [
+				{
+					"name": "name",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "javaClass",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "description",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "annotations",
+					"dataTypeName": "array<Annotation>",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "originRef",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "replicaRefs",
+					"dataTypeName": "array<string>",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "DataStore",
+			"typeDescription": "The base for all kinds of data stores.",
+			"attributeDefinitions": [
+				{
+					"name": "connections",
+					"dataTypeName": "array<Connection>",
+					"multiplicity": "optional",
+					"isComposite": true,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Connection",
+			"typeDescription": "The base for all kinds of connections.",
+			"attributeDefinitions": []
+		},
+		{
+			"superTypes": ["DataStore"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Database",
+			"typeDescription": "A relational database.",
+			"attributeDefinitions":
+			[
+				{
+					"name": "dbType",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "schemas",
+					"dataTypeName": "array<Schema>",
+					"multiplicity": "optional",
+					"isComposite": true,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		{
+			"superTypes": ["Connection"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "JDBCConnection",
+			"typeDescription": "A JDBC connection.",
+			"attributeDefinitions": [
+				{
+					"name": "jdbcConnectionString",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "user",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "password",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+
+			]
+		},
+		
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "DataFileFolder",
+			"typeDescription": "A folder containg data files or other folders.",
+			"attributeDefinitions": [
+				{
+					"name": "dataFileFolders",
+					"dataTypeName": "array<DataFileFolder>",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "dataFiles",
+					"dataTypeName": "array<DataFile>",
+					"multiplicity": "optional",
+					"isComposite": true,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "DataSet",
+			"typeDescription": "The base for all kinds of data sets (tables, files, etc.).",
+			"attributeDefinitions": [
+			]
+		},
+
+		{
+			"superTypes": ["DataSet"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "RelationalDataSet",
+			"typeDescription": "The base of a relational data set.",
+			"attributeDefinitions": [
+				{
+					"name": "columns",
+					"dataTypeName": "array<Column>",
+					"multiplicity": "optional",
+					"isComposite": true,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Column",
+			"typeDescription": "A relational column.",
+			"attributeDefinitions": [
+				{
+					"name": "dataType",
+					"dataTypeName": "string",
+					"multiplicity": "required",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+
+        {
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Schema",
+			"typeDescription": "The schema of a relational database",
+			"attributeDefinitions": [
+				{
+					"name": "tables",
+					"dataTypeName": "array<Table>",
+					"multiplicity": "optional",
+					"isComposite": true,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+				
+		{
+			"superTypes": ["RelationalDataSet"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Table",
+			"typeDescription": "A relational table.",
+			"attributeDefinitions": [
+			]
+		},
+
+		{
+			"superTypes": ["RelationalDataSet"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "DataFile",
+			"typeDescription": "A file containing relational data.",
+			"attributeDefinitions": [
+				{
+					"name": "urlString",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		{
+			"superTypes": ["DataSet"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Document",
+			"typeDescription": "An unstructured document",
+			"attributeDefinitions": [
+				{
+					"name": "urlString",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "encoding",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "Annotation",
+			"typeDescription": "The base for all annotations created through the Open Discovery Framework.",
+			"attributeDefinitions": [
+				{
+					"name": "annotationType",
+					"dataTypeName": "string",
+					"multiplicity": "required",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "analysisRun",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "jsonProperties",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "summary",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		
+		{
+			"superTypes": ["Annotation"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "ProfilingAnnotation",
+			"typeDescription": "The base for all annotations carrying profile attributes of an object.",
+			"attributeDefinitions": [
+				{
+					"name": "profiledObject",
+					"dataTypeName": "MetaDataObject",
+					"multiplicity": "required",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		
+		{
+			"superTypes": ["Annotation"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "ClassificationAnnotation",
+			"typeDescription": "The base for all annotations assigning a object to another object.",
+			"attributeDefinitions": [
+				{
+					"name": "classifiedObject",
+					"dataTypeName": "MetaDataObject",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "classifyingObjects",
+					"dataTypeName": "array<MetaDataObject>",
+					"multiplicity": "collection",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+		
+		{
+			"superTypes": ["Annotation"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "RelationshipAnnotation",
+			"typeDescription": "The base for all annotations expressing a relationship between objects.",
+			"attributeDefinitions": [
+				{
+					"name": "relatedObjects",
+					"dataTypeName": "array<MetaDataObject>",
+					"multiplicity": "collection",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		},
+
+		{
+			"superTypes": ["MetaDataObject"],
+			"hierarchicalMetaTypeName": "org.apache.atlas.typesystem.types.ClassType",
+			"typeName": "BusinessTerm",
+			"typeDescription": "A business term of the glossary.",
+			"attributeDefinitions": [
+				{
+					"name": "abbreviations",
+					"dataTypeName": "array<string>",
+					"multiplicity": "collection",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "example",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				},
+				{
+					"name": "usage",
+					"dataTypeName": "string",
+					"multiplicity": "optional",
+					"isComposite": false,
+					"isUnique": false,
+					"isIndexable": true,
+					"reverseAttributeName": null
+				}
+			]
+		}
+
+	]
+}
diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json
new file mode 100755
index 0000000..ec546e7
--- /dev/null
+++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-reference.json
@@ -0,0 +1,16 @@
+{
+	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
+	"id": {
+		"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
+		"id": "-1",
+		"version": 0,
+		"typeName": "ODFAnnotation"
+	},
+	"typeName": "ODFAnnotation",
+	"values": {
+		
+	},
+	"traitNames": [],
+	"traits": {
+	}
+}
diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json
new file mode 100755
index 0000000..99ad73c
--- /dev/null
+++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-object-template.json
@@ -0,0 +1,16 @@
+{
+	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Reference",
+	"id": {
+		"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Id",
+		"id": "-5445763795823115",
+		"version": 0,
+		"typeName": "ODFAnnotation"
+	},
+	"typeName": "ODFAnnotation",
+	"values": {
+		
+	},
+	"traitNames": [],
+	"traits": {
+	}
+}
diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json
new file mode 100755
index 0000000..8514fc6
--- /dev/null
+++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-reference-template.json
@@ -0,0 +1,6 @@
+{
+	"jsonClass": "org.apache.atlas.typesystem.json.InstanceSerialization$_Id",
+	"id": "-5445763795823115",
+	"version": 0,
+	"typeName": "TYPE_NAME"
+}
diff --git a/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..8587cc8
--- /dev/null
+++ b/odf/odf-atlas/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,15 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Overwrite default implementation
+org.apache.atlas.odf.api.metadata.MetadataStore=org.apache.atlas.odf.core.metadata.atlas.AtlasMetadataStore
diff --git a/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java b/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java
new file mode 100755
index 0000000..1458cb3
--- /dev/null
+++ b/odf/odf-atlas/src/test/java/org/apache/atlas/odf/core/runtime/ODFFactoryClassesNoMockTest.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.runtime;
+
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.apache.atlas.odf.core.test.messaging.MockQueueManager;
+import org.apache.atlas.odf.core.test.spark.MockSparkServiceExecutor;
+import org.apache.atlas.odf.core.test.store.MockConfigurationStorage;
+
+public class ODFFactoryClassesNoMockTest extends ODFTestcase {
+
+	Logger logger = Logger.getLogger(ODFFactoryClassesNoMockTest.class.getName());
+
+	<T> void testFactoryDoesNotCreateInstanceOf(Class<T> interfaceClass, Class<? extends T> mockClass) {
+		ODFInternalFactory f = new ODFInternalFactory();
+		logger.info("Testing mock class for interface: " + interfaceClass.getName());
+		T obj = f.create(interfaceClass);
+		logger.info("Factory created object of type " + obj.getClass().getName());
+		Assert.assertFalse(mockClass.isInstance(obj));
+	}
+
+	@Test
+	public void testNoMockClasses() {
+		logger.info("Testing that no mock classes are used");
+
+		testFactoryDoesNotCreateInstanceOf(ODFConfigurationStorage.class, MockConfigurationStorage.class);
+		testFactoryDoesNotCreateInstanceOf(DiscoveryServiceQueueManager.class, MockQueueManager.class);
+		testFactoryDoesNotCreateInstanceOf(SparkServiceExecutor.class, MockSparkServiceExecutor.class);
+	}
+}
diff --git a/odf/odf-core/.gitignore b/odf/odf-core/.gitignore
new file mode 100755
index 0000000..94858e5
--- /dev/null
+++ b/odf/odf-core/.gitignore
@@ -0,0 +1,19 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+derby.log
diff --git a/odf/odf-core/pom.xml b/odf/odf-core/pom.xml
new file mode 100755
index 0000000..00f5cdb
--- /dev/null
+++ b/odf/odf-core/pom.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns:if="ant:if">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-core</artifactId>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.wink</groupId>
+			<artifactId>wink-json4j</artifactId>
+			<version>1.4</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.httpcomponents</groupId>
+			<artifactId>fluent-hc</artifactId>
+			<version>4.5.1</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.derby</groupId>
+			<artifactId>derby</artifactId>
+			<version>10.12.1.1</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.jasypt</groupId>
+			<artifactId>jasypt</artifactId>
+			<version>1.9.2</version>
+		</dependency>
+		<dependency>
+			<artifactId>swagger-jaxrs</artifactId>
+			<version>1.5.9</version>
+			<groupId>io.swagger</groupId>
+			<scope>compile</scope>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+					</systemPropertyVariables>
+					<includes>
+						<include>**/configuration/**</include>
+						<!-- All other odf-core unit tests are executed in the odf-messaging project -->
+						<!-- Add individual test here to run them with the MockQueueManager rather than with Kafka -->
+					</includes>
+					<excludes>
+						<exclude>**/integrationtest/**</exclude>
+					</excludes>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-jar-plugin</artifactId>
+				<version>2.6</version>
+				<executions>
+					<execution>
+						<goals>
+							<goal>test-jar</goal>
+						</goals>
+						<configuration>
+						<!-- remove implementations properties file for test jar -->
+							<excludes>
+								<exclude>org/apache/atlas/odf/odf-implementation.properties</exclude>
+							</excludes>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+
+</project>
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java
new file mode 100755
index 0000000..ffd2ba9
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Encryption.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import org.jasypt.exceptions.EncryptionOperationNotPossibleException;
+import org.jasypt.util.text.BasicTextEncryptor;
+import org.apache.commons.codec.binary.Base64;
+
+public class Encryption {
+
+	//TODO Store this password at a secure location provided by the surrounding platform.
+	private static final String b64EncryptionPassword = "eGg1NyQyMyUtIXFQbHoxOHNIdkM=";
+
+	public static String encryptText(String plainText) {
+		if ((plainText != null) && (!plainText.isEmpty())) {
+			BasicTextEncryptor textEncryptor = new BasicTextEncryptor();
+			byte[] plainEncryptionPassword= Base64.decodeBase64(b64EncryptionPassword);
+			textEncryptor.setPassword(new String(plainEncryptionPassword));
+			return textEncryptor.encrypt(plainText);
+		} else {
+			return plainText;
+		}
+	}
+
+	public static String decryptText(String encryptedText) {
+		if ((encryptedText != null) && (!encryptedText.isEmpty())) {
+			BasicTextEncryptor textEncryptor = new BasicTextEncryptor();
+			byte[] plainEncryptionPassword= Base64.decodeBase64(b64EncryptionPassword);
+			textEncryptor.setPassword(new String(plainEncryptionPassword));
+			String result = textEncryptor.decrypt(encryptedText);
+			return result;
+		} else {
+			return encryptedText;
+		}
+	}
+	
+	public static boolean isEncrypted(String text) {
+		try {
+			decryptText(text);
+		} catch(EncryptionOperationNotPossibleException exc) {
+			return false;
+		}
+		return true;
+	}
+
+	/*
+	// Uncomment and use the following code for encrypting passwords to be stored in the odf-initial-configuration.json file.
+	public static void main(String[] args) {
+		if (args.length != 1)  {
+			System.out.println("usage: java Encryption <plain password>");
+		} else {
+			System.out.println("Encrypted password: " + encryptText(args[0]));
+		}
+	}
+	 */
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java
new file mode 100755
index 0000000..313b25d
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Environment.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+
+public interface Environment {
+	
+	String getZookeeperConnectString();
+	
+	String getProperty(String propertyName);
+	
+	Map<String, String> getPropertiesWithPrefix(String prefix);
+	
+	String getCurrentUser();
+	
+	ConfigContainer getDefaultConfiguration();
+	
+	/**
+	 * Returns the names of the runtimes active in this environment.
+	 * Return null to indicate that all available runtimes should be active.
+	 */
+	List<String> getActiveRuntimeNames();
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java
new file mode 100755
index 0000000..4021049
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFImplementations.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.text.MessageFormat;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+public class ODFImplementations {
+
+	Logger logger = Logger.getLogger(ODFImplementations.class.getName());
+
+	private Map<String, String> implementations = new HashMap<String, String>();
+
+	public ODFImplementations(String path, ClassLoader cl) {
+		Enumeration<URL> resources;
+		try {
+			resources = cl.getResources(path);
+		} catch (IOException exc) {
+			logger.log(Level.WARNING, MessageFormat.format("An error occurred while reading properties ''0'' could not be loaded", path), exc);
+			return;
+		}
+		while (resources.hasMoreElements()) {
+			URL url = resources.nextElement();
+			try {
+				InputStream is = url.openStream();
+				if (is != null) {
+					Properties props = new Properties();
+					props.load(is);
+					for (Object key : props.keySet()) {
+						String keyString = (String) key;
+						try {
+							if (implementations.containsKey(key)) {
+								String existingClassString = implementations.get(keyString);
+								String newClassString = props.getProperty(keyString);
+								if (!existingClassString.equals(newClassString)) {
+									Class<?> existingClass = cl.loadClass(existingClassString);
+									Class<?> newClass = cl.loadClass(newClassString);
+									String superClass = null;
+									String subClass = null;
+									// select the class lowest in the class hierarchy 
+									if (existingClass.isAssignableFrom(newClass)) {
+										superClass = existingClassString;
+										subClass = newClassString;
+									} else if (newClass.isAssignableFrom(existingClass)) {
+										superClass = newClassString;
+										subClass = existingClassString;
+									}
+									if (superClass != null) {
+										logger.log(Level.INFO, "Implementation for interface ''{0}'' was found more than once, using subclass ''{1}'' (found superclass ''{2}'')",
+												new Object[] { key, subClass, superClass });
+										implementations.put(keyString, subClass);
+									} else {
+										logger.log(Level.WARNING, "Implementation for interface ''{0}'' was found more than once, using ''{1}''. (Conflict between ''{1}'' and ''{2}'')",
+												new Object[] { key, existingClassString, newClassString });
+									}
+								}
+							} else {
+								cl.loadClass(props.getProperty(keyString));
+								implementations.put(keyString, props.getProperty(keyString));
+							}
+						} catch (ClassNotFoundException exc) {
+							logger.log(Level.SEVERE, "Class found in odf-implementation.properties file could not be loaded", exc);
+						}
+					}
+				}
+			} catch (IOException e) {
+				logger.log(Level.WARNING, MessageFormat.format("Properties ''0'' could not be loaded", url), e);
+			}
+		}
+	}
+
+	public Map<String, String> getImplementations() {
+		return implementations;
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java
new file mode 100755
index 0000000..64e54ad
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInitializer.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+
+public class ODFInitializer {
+
+	static Logger logger = Logger.getLogger(ODFInitializer.class.getName());
+
+	static Object initLock = new Object();
+
+	private static boolean running = false;
+	private static long lastStopTimestamp = 0;
+	private static long lastStartTimestamp = 0;
+	private static boolean startStopInProgress = false;
+	
+
+	public static long getLastStopTimestamp() {
+		synchronized (initLock) {
+			return lastStopTimestamp;
+		}
+	}
+
+	public static long getLastStartTimestamp() {
+		synchronized (initLock) {
+			return lastStartTimestamp;
+		}
+	}
+
+	public static boolean isRunning() {
+		synchronized (initLock) {
+			return running;
+		}
+	}
+	
+	public static boolean isStartStopInProgress() {
+		return startStopInProgress;
+	}
+
+	public static void start() {
+		synchronized (initLock) {
+			if (!running) {
+				startStopInProgress = true;
+				DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
+				try {
+					qm.start();
+				} catch (Exception e) {
+					logger.log(Level.WARNING, "Timeout occurred while starting ODF", e);
+				}
+				lastStartTimestamp = System.currentTimeMillis();
+				running = true;
+				startStopInProgress = false;
+			}
+		}
+	}
+
+	public static void stop() {
+		synchronized (initLock) {
+			if (running) {
+				startStopInProgress = true;
+				ODFInternalFactory f = new ODFInternalFactory();
+				DiscoveryServiceQueueManager qm = f.create(DiscoveryServiceQueueManager.class);
+				try {
+					qm.stop();
+				} catch (TimeoutException e) {
+					logger.log(Level.WARNING, "Timeout occurred while stopping ODF", e);
+				}
+				ThreadManager tm = f.create(ThreadManager.class);
+				tm.shutdownAllUnmanagedThreads();
+				AnalysisRequestTrackerStore arts = f.create(AnalysisRequestTrackerStore.class);
+				arts.clearCache();
+				lastStopTimestamp = System.currentTimeMillis();
+				running = false;
+				startStopInProgress = false;
+			}
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java
new file mode 100755
index 0000000..9023b5d
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFInternalFactory.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.utils.ODFLogConfig;
+
+public class ODFInternalFactory {
+
+	private static Properties defaultImplemenetations = Utils.readConfigProperties("org/apache/atlas/odf/core/internal/odf-default-implementation.properties");
+	private static ODFImplementations overwrittenImplementations = null;
+	private static Map<Class<?>, Object> singletons = new HashMap<>();
+
+	public static String SINGLETON_MARKER = "@singleton";
+
+	static {
+		ODFLogConfig.run();
+
+		Logger logger = Logger.getLogger(ODFInternalFactory.class.getName());
+		ClassLoader cl = ODFInternalFactory.class.getClassLoader();
+		String overwriteConfig = "org/apache/atlas/odf/odf-implementation.properties";
+		overwrittenImplementations = new ODFImplementations(overwriteConfig, cl);
+		if (overwrittenImplementations.getImplementations().isEmpty()) {
+			overwrittenImplementations = null;
+		} else {
+			logger.log(Level.INFO, "Found overwritten implementation config: {0}", overwrittenImplementations.getImplementations());
+		}
+		if (overwrittenImplementations == null) {
+			logger.log(Level.INFO, "Default implementations are used");
+		}
+	}
+
+	private Object createObject(Class<?> cl) throws ClassNotFoundException, IllegalAccessException, InstantiationException {
+		String clazz = null;
+		if (overwrittenImplementations != null) {
+			clazz = overwrittenImplementations.getImplementations().get(cl.getName());
+		}
+		if (clazz == null) {
+			clazz = defaultImplemenetations.getProperty(cl.getName());
+		}
+		if (clazz == null) {
+			// finally try to instantiate the class as such
+			clazz = cl.getName();
+		}
+		boolean isSingleton = false;
+		if (clazz.endsWith(SINGLETON_MARKER)) {
+			clazz = clazz.substring(0, clazz.length() - SINGLETON_MARKER.length());
+			isSingleton = true;
+		}
+		Object o = null;
+		Class<?> implClass = this.getClass().getClassLoader().loadClass(clazz);
+		if (isSingleton) {
+			o = singletons.get(implClass);
+			if (o == null) {
+				o = implClass.newInstance();
+				singletons.put(implClass, o);
+			}
+		} else {
+			o = implClass.newInstance();
+		}
+		return o;
+	}
+
+	@SuppressWarnings("unchecked")
+	public <T> T create(Class<T> cl) {
+		try {
+			return (T) createObject(cl);
+		} catch (ClassNotFoundException e) {
+			throw new RuntimeException(e);
+		} catch (IllegalAccessException e) {
+			throw new RuntimeException(e);
+		} catch (InstantiationException e) {
+			throw new RuntimeException(e);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java
new file mode 100755
index 0000000..623a727
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/ODFUtils.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.text.MessageFormat;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+
+public class ODFUtils {
+	public static int DEFAULT_TIMEOUT_SECS = 10 * 60; // 10 minutes
+
+	public static AnalysisRequestStatus runSynchronously(AnalysisManager analysisManager, AnalysisRequest request) {
+		return runSynchronously(analysisManager, request, DEFAULT_TIMEOUT_SECS); // default is 
+	}
+
+	public static AnalysisRequestStatus runSynchronously(AnalysisManager analysisManager, AnalysisRequest request, int timeoutInSeconds) {
+		Logger logger = Logger.getLogger(ODFUtils.class.getName());
+		AnalysisResponse response = analysisManager.runAnalysis(request);
+		if (response.isInvalidRequest()) {
+			AnalysisRequestStatus status = new AnalysisRequestStatus();
+			status.setState(AnalysisRequestStatus.State.ERROR);
+			status.setDetails(MessageFormat.format("Request was invalid. Details: {0}", response.getDetails()));
+			status.setRequest(request);
+			return status;
+		}
+		AnalysisRequestStatus status = null;
+		long startTime = System.currentTimeMillis();
+		boolean timeOutReached = false;
+		do {
+			logger.fine("Polling for result...");
+			status = analysisManager.getAnalysisRequestStatus(response.getId());
+			try {
+				Thread.sleep(1000);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+			}
+			long currentTime = System.currentTimeMillis();
+			timeOutReached = (currentTime - startTime) > (timeoutInSeconds * 1000);
+		} while ((AnalysisRequestStatus.State.ACTIVE.equals(status.getState()) || AnalysisRequestStatus.State.QUEUED.equals(status.getState()) //
+				&& !timeOutReached));
+		return status;
+
+	}
+
+	public static AnalysisRequestStatus.State combineStates(List<AnalysisRequestStatus.State> allStates) {
+		// if one of the requests is in error, so is the complete request
+		if (allStates.contains(AnalysisRequestStatus.State.ERROR)) {
+			return AnalysisRequestStatus.State.ERROR;
+		}
+		// if no request could be found -> not found
+		if (Utils.containsOnly(allStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.NOT_FOUND })) {
+			return AnalysisRequestStatus.State.NOT_FOUND;
+		}
+		// if all request are either not found or finished -> finished
+		if (Utils.containsOnly(allStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.NOT_FOUND, AnalysisRequestStatus.State.FINISHED })) {
+			return AnalysisRequestStatus.State.FINISHED;
+		}
+		// else always return active
+		return AnalysisRequestStatus.State.ACTIVE;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java
new file mode 100755
index 0000000..e8361fd
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/OpenDiscoveryFrameworkImpl.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.engine.EngineManager;
+import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class OpenDiscoveryFrameworkImpl implements OpenDiscoveryFramework {
+
+	private Logger logger = Logger.getLogger(OpenDiscoveryFrameworkImpl.class.getName());
+
+	public OpenDiscoveryFrameworkImpl() {
+		if (!ODFInitializer.isRunning() && !ODFInitializer.isStartStopInProgress()) {
+			logger.log(Level.INFO, "Initializing Open Discovery Platform");
+			ODFInitializer.start();
+			getEngineManager().checkHealthStatus(); // This implicitly initializes the control center and the message queues
+			
+			logger.log(Level.INFO, "Open Discovery Platform successfully initialized.");
+			
+			// log active runtimes
+			ServiceRuntimesInfo activeRuntimesInfo = ServiceRuntimes.getRuntimesInfo(ServiceRuntimes.getActiveRuntimes());
+			try {
+				logger.log(Level.INFO, "Active runtimes: ''{0}''", JSONUtils.toJSON(activeRuntimesInfo));
+			} catch (JSONException e) {
+				logger.log(Level.WARNING, "Active runtime info has wrong format", e);
+			}
+		}
+	}
+
+	public AnalysisManager getAnalysisManager() {
+		return new ODFInternalFactory().create(AnalysisManager.class);
+	}
+
+	public DiscoveryServiceManager getDiscoveryServiceManager() {
+		return new ODFInternalFactory().create(DiscoveryServiceManager.class);
+	}
+
+	public EngineManager getEngineManager() {
+		return new ODFInternalFactory().create(EngineManager.class);
+	}
+
+	public SettingsManager getSettingsManager() {
+		return new ODFInternalFactory().create(SettingsManager.class);
+	}
+
+	public AnnotationStore getAnnotationStore() {
+		return new ODFInternalFactory().create(AnnotationStore.class);
+	}
+
+	public MetadataStore getMetadataStore() {
+		return new ODFInternalFactory().create(MetadataStore.class);
+	}
+
+	public JDBCMetadataImporter getJDBCMetadataImporter() {
+		return new ODFInternalFactory().create(JDBCMetadataImporter.class);
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java
new file mode 100755
index 0000000..e58dd37
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/StandaloneEnvironment.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+
+public class StandaloneEnvironment implements Environment {
+
+	@Override
+	public String getProperty(String propertyName) {
+		return System.getProperty(propertyName);
+	}
+
+	@Override
+	public String getCurrentUser() {
+		return System.getProperty("user.name");
+	}
+
+	@Override
+	public String getZookeeperConnectString() {
+		return getProperty("odf.zookeeper.connect");
+	}
+
+	@Override
+	public ConfigContainer getDefaultConfiguration() {
+		return Utils.readConfigurationFromClasspath("org/apache/atlas/odf/core/internal/odf-initial-configuration.json");
+	}
+
+	@Override
+	public Map<String, String> getPropertiesWithPrefix(String prefix) {
+		Map<String, String> foundProps = new HashMap<>();
+		Properties props = System.getProperties();
+		for (String key : props.stringPropertyNames()) {
+			if (key.startsWith(prefix)) {
+				foundProps.put(key, props.getProperty(key));
+			}
+		}
+		return foundProps;
+	}
+
+	@Override
+	public List<String> getActiveRuntimeNames() {
+		String p = getProperty("odf.active.runtimes");
+		if (p == null || p.equals("ALL")) {
+			return null;
+		}
+		if (p.equals("NONE")) {
+			return new ArrayList<>();
+		}
+		return Arrays.asList(p.split(","));
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java
new file mode 100755
index 0000000..060f9fb
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/Utils.java
@@ -0,0 +1,314 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.StringTokenizer;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.settings.KafkaConsumerConfig;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+
+public class Utils {
+
+	static Logger logger = Logger.getLogger(Utils.class.getName());
+
+	private static final List<Class<? extends Object>> MERGABLE_CLASSES = Arrays.asList(ConfigContainer.class, KafkaConsumerConfig.class, ODFSettings.class, DiscoveryServiceProperties.class);
+
+	public static void mergeODFPOJOs(Object source, Object update) {
+		if (!source.getClass().isAssignableFrom(update.getClass())) {
+			return;
+		}
+
+		Method[] sourceMethods = source.getClass().getDeclaredMethods();
+
+		for (Method getterMethod : sourceMethods) {
+			if (getterMethod.getName().startsWith("get") || getterMethod.getName().startsWith("is")) {
+				String setterMethodName = getterMethod.getName().replaceFirst("get", "set");
+				if (getterMethod.getName().startsWith("is")) {
+					setterMethodName = setterMethodName.replaceFirst("is", "set");
+				}
+				try {
+					Method setterMethod = source.getClass().getDeclaredMethod(setterMethodName, getterMethod.getReturnType());
+					Object updateValue = getterMethod.invoke(update);
+					if (updateValue != null) {
+						Object sourceValue = getterMethod.invoke(source);
+
+						if (sourceValue != null && MERGABLE_CLASSES.contains(updateValue.getClass())) {
+							//Value is another POJO, must also try merging these instead of overwriting
+							mergeODFPOJOs(sourceValue, updateValue);
+							setterMethod.invoke(source, sourceValue);
+						} else if (sourceValue instanceof Map && updateValue instanceof Map) {
+							Map updateJSON = (Map) updateValue;
+							Map sourceJSON = (Map) sourceValue;
+							for (Object key : updateJSON.keySet()) {
+								sourceJSON.put(key, updateJSON.get(key));
+							}
+							setterMethod.invoke(source, sourceJSON);
+						} else {
+							setterMethod.invoke(source, updateValue);
+						}
+					}
+
+				} catch (NoSuchMethodException e) {
+					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, no matching method found for {2}!", source.getClass().getName(), update
+							.getClass().getName(), getterMethod.getName()), e);
+				} catch (SecurityException e) {
+					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} could not be accessed (SecurityException)!", source.getClass()
+							.getName(), update.getClass().getName(), setterMethodName), e);
+				} catch (IllegalAccessException e) {
+					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} could not be accessed! (IllegalAccessException)", source.getClass()
+							.getName(), update.getClass().getName(), getterMethod.getName()), e);
+				} catch (IllegalArgumentException e) {
+					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} does not accept the right parameters!", source.getClass().getName(),
+							update.getClass().getName(), setterMethodName), e);
+				} catch (InvocationTargetException e) {
+					e.printStackTrace();
+					throw new RuntimeException(MessageFormat.format("Objects of type {0}  and {1} could not be merged, method {2} or {3} could not be invoked!", source.getClass().getName(), update
+							.getClass().getName(), getterMethod.getName(), setterMethodName), e);
+				}
+
+			}
+		}
+	}
+
+	public static Properties readConfigProperties(String path) {
+		// TODO cache this in static variables, it doesn't change at runtime 
+		InputStream is = Utils.class.getClassLoader().getResourceAsStream(path);
+		if (is == null) {
+			return null;
+		}
+		Properties props = new Properties();
+		try {
+			props.load(is);
+		} catch (IOException e) {
+			throw new RuntimeException(e);
+		}
+		return props;
+	}
+
+	public static void setCurrentTimeAsLastModified(AnalysisRequestTracker tracker) {
+		tracker.setLastModified(System.currentTimeMillis());
+	}
+
+	public static String getExceptionAsString(Throwable exc) {
+		StringWriter sw = new StringWriter();
+		PrintWriter pw = new PrintWriter(sw);
+		exc.printStackTrace(pw);
+		String st = sw.toString();
+		return st;
+	}
+
+	public static String collectionToString(Collection<?> coll, String separator) {
+		StringBuffer buf = null;
+		for (Object o : coll) {
+			if (buf == null) {
+				buf = new StringBuffer("[ ");
+			} else {
+				buf.append(separator);
+			}
+			buf.append(o.toString());
+		}
+		buf.append(" ]");
+		return buf.toString();
+	}
+
+	public static <T> boolean containsOnly(List<T> l, T[] elements) {
+		for (T t : l) {
+			boolean containsOnlyElements = false;
+			for (T el : elements) {
+				if (t.equals(el)) {
+					containsOnlyElements = true;
+					break;
+				}
+			}
+			if (!containsOnlyElements) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+	public static <T> boolean containsNone(List<T> l, T[] elements) {
+		for (T t : l) {
+			boolean containsAnyElement = false;
+			for (T el : elements) {
+				if (t.equals(el)) {
+					containsAnyElement = true;
+					break;
+				}
+			}
+			if (containsAnyElement) {
+				return true;
+			}
+		}
+		return false;
+	}
+
+	public static List<String> splitString(String s, char separator) {
+		List<String> l = new ArrayList<String>();
+		if (s != null) {
+			StringTokenizer tok = new StringTokenizer(s, String.valueOf(separator));
+			while (tok.hasMoreTokens()) {
+				l.add(tok.nextToken());
+			}
+		}
+		return l;
+	}
+
+	public static String getInputStreamAsString(InputStream is, String encoding) {
+		try {
+			final int n = 2048;
+			byte[] b = new byte[0];
+			byte[] temp = new byte[n];
+			int bytesRead;
+			while ((bytesRead = is.read(temp)) != -1) {
+				byte[] newB = new byte[b.length + bytesRead];
+				System.arraycopy(b, 0, newB, 0, b.length);
+				System.arraycopy(temp, 0, newB, b.length, bytesRead);
+				b = newB;
+			}
+			String s = new String(b, encoding);
+			return s;
+		} catch (IOException exc) {
+			return getExceptionAsString(exc);
+		}
+	}
+
+	public static void mergeJSONObjects(JSONObject source, JSONObject target) {
+		if (source != null && target != null) {
+			target.putAll(source);
+		}
+	}
+
+	public static <T> T getValue(T value, T defaultValue) {
+		if (value == null) {
+			return defaultValue;
+		}
+		return value;
+	}
+
+	public static String getSystemPropertyExceptionIfMissing(String propertyName) {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		String value = env.getProperty(propertyName);
+		if (value == null) {
+			String msg = MessageFormat.format("System property ''{0}'' is not set", propertyName);
+			logger.log(Level.SEVERE, msg);
+			throw new RuntimeException(msg);
+		}
+		return value;
+	}
+	
+	public static int getIntEnvironmentProperty(String propertyName, int defaultValue) {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		String value = env.getProperty(propertyName);
+		if (value == null) {
+			return defaultValue;
+		}
+		try {
+			return Integer.parseInt(value);
+		} catch(NumberFormatException exc) {
+			return defaultValue;
+		}
+	}
+
+
+	public static void runSystemCommand(String command) {
+		logger.log(Level.INFO, "Running system command: " + command);
+		try {
+			Runtime r = Runtime.getRuntime();
+			Process p = r.exec(command);
+			p.waitFor();
+			BufferedReader b = new BufferedReader(new InputStreamReader(p.getInputStream()));
+			String line = "";
+			while ((line = b.readLine()) != null) {
+				logger.log(Level.INFO, "System command out: " + line);
+			}
+			b.close();
+		} catch(IOException | InterruptedException e) {
+			logger.log(Level.INFO, "Error executing system command.", e);
+		}
+	}
+	
+	public static ConfigContainer readConfigurationFromClasspath(String jsonFileInClasspath) {
+		InputStream is = SettingsManager.class.getClassLoader().getResourceAsStream(jsonFileInClasspath);
+		try {
+			JSONObject configJSON = new JSONObject(is);
+			ConfigContainer config = JSONUtils.fromJSON(configJSON.write(), ConfigContainer.class);
+			return config;
+		} catch (Exception exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+
+	public static String joinStrings(List<String> l, char separator) {
+		String result = null;
+		if ((l != null) && !l.isEmpty()) {
+			StringBuilder buf = null;
+			for (String s : l) {
+				if (buf == null) {
+					buf = new StringBuilder();
+				} else {
+					buf.append(separator);
+				}
+				buf.append(s);
+			}
+			result = buf.toString();
+		}
+		return result;
+	}
+	
+	public static String getEnvironmentProperty(String name, String defaultValue) {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		String s = env.getProperty(name);
+		return s != null ? s : defaultValue;		
+	}
+	
+	public static long getEnvironmentProperty(String name, long defaultValue) {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		String s = env.getProperty(name);
+		if (s == null) {
+			return defaultValue;
+		}
+		try {
+			return Long.parseLong(s);
+		} catch(NumberFormatException exc) {
+			String msg = MessageFormat.format("Property ''{0}'' could not be converted to an integer", new Object[]{name});
+			logger.log(Level.WARNING, msg);
+			return defaultValue;
+		}
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java
new file mode 100755
index 0000000..8f7fab2
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/analysis/AnalysisManagerImpl.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.analysis;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.ODFUtils;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+ *
+ * External Java API for creating and managing analysis requests
+ *
+ */
+public class AnalysisManagerImpl implements AnalysisManager {
+
+	public final static char COMPOUND_REQUEST_SEPARATOR = ',';
+	private Logger logger = Logger.getLogger(AnalysisManagerImpl.class.getName());
+	private ControlCenter controlCenter;
+
+	public AnalysisManagerImpl() {
+		controlCenter = new ODFInternalFactory().create(ControlCenter.class);
+	}
+
+	/**
+	 * Issues a new ODF analysis request
+	 *
+	 * @param request Analysis request
+	 * @return Response containing the request id and status information
+	 */
+	public AnalysisResponse runAnalysis(AnalysisRequest request) {
+		if (((request.getDiscoveryServiceSequence() == null) || request.getDiscoveryServiceSequence().isEmpty())
+			&& ((request.getAnnotationTypes() == null) || request.getAnnotationTypes().isEmpty())) {
+			AnalysisResponse response = new AnalysisResponse();
+			response.setId(request.getId());
+			response.setDetails("Either a sequence of discovery service ids or a list of annotation types must be specified to initiate an analysis request.");
+			response.setInvalidRequest(true);
+			return response;
+		}
+
+		if ((request.getDataSets().size() == 1) || request.isProcessDataSetsSequentially()) {
+			logger.log(Level.INFO, "Using sequential request processing (maybe because there is only a single data set)");
+			AnalysisResponse response = controlCenter.startRequest(request);
+			logger.log(Level.INFO, "Request with ID ''{0}'' started on data sets ''{1}''. Complete request: {2}.",
+					new Object[] { response.getId(), request.getDataSets(), JSONUtils.lazyJSONSerializer(request) });
+			return response;
+		}
+
+		List<String> requestIDs = new ArrayList<String>();
+		List<String> detailsMessages = new ArrayList<String>();
+		boolean invalidRequest = true;
+		logger.log(Level.INFO, "Running requests for ''{0}'' data sets in parallel", request.getDataSets().size());
+		logger.log(Level.FINE, "Splitting request into multiple request for each data set. Data Sets: {0}", request.getDataSets());
+		for (MetaDataObjectReference dataSet : request.getDataSets()) {
+			AnalysisRequest partRequest = new AnalysisRequest();
+			partRequest.setDiscoveryServiceSequence(request.getDiscoveryServiceSequence());
+			partRequest.setAdditionalProperties(request.getAdditionalProperties());
+			partRequest.setDataSets(Collections.singletonList(dataSet));
+			AnalysisResponse partResponse = controlCenter.startRequest(partRequest);
+			if (!partResponse.isInvalidRequest()) {
+				String partRequestID = partResponse.getId();
+				requestIDs.add(partRequestID);
+				detailsMessages.add(partResponse.getDetails());
+				// as soon as one request is valid, we make the compound request valid
+				invalidRequest = false;
+			}
+		}
+		AnalysisResponse response = new AnalysisResponse();
+		response.setId(Utils.joinStrings(requestIDs, COMPOUND_REQUEST_SEPARATOR));
+		response.setDetails(Utils.joinStrings(detailsMessages, COMPOUND_REQUEST_SEPARATOR));
+		response.setInvalidRequest(invalidRequest);
+		return response;
+	}
+
+	/**
+	 * Retrieve status of an ODF analysis request
+	 *
+	 * @param requestId Unique id of the analysis request
+	 * @return Status of the analysis request
+	 */
+	public AnalysisRequestStatus getAnalysisRequestStatus(String requestId) {
+		List<String> singleRequestIds = Utils.splitString(requestId, COMPOUND_REQUEST_SEPARATOR);
+		if (singleRequestIds.size() == 1) {
+			AnalysisRequestStatus status = controlCenter.getRequestStatus(requestId);
+			return status;
+		}
+		AnalysisRequestStatus compoundStatus = new AnalysisRequestStatus();
+		compoundStatus.setState(State.QUEUED);
+		AnalysisRequest compoundRequest = new AnalysisRequest(); // assemble a compound request 
+		compoundRequest.setId(requestId);
+		List<String> allMessages = new ArrayList<String>();
+		List<MetaDataObjectReference> allDataSets = new ArrayList<>();
+		List<State> allStates = new ArrayList<>();
+		for (String singleRequestId : singleRequestIds) {	
+			AnalysisRequestStatus singleStatus = controlCenter.getRequestStatus(singleRequestId);
+			if (compoundRequest.getDiscoveryServiceSequence() == null) {
+				// assume all fields of the single requests are the same
+				// since they were created through runAnalysis()
+				compoundRequest.setDiscoveryServiceSequence(singleStatus.getRequest().getDiscoveryServiceSequence());
+				compoundRequest.setAdditionalProperties(singleStatus.getRequest().getAdditionalProperties());
+			}
+			if (singleStatus.getRequest().getDataSets() != null) {
+				allDataSets.addAll(singleStatus.getRequest().getDataSets());
+			}
+			allStates.add(singleStatus.getState());
+			allMessages.add(singleStatus.getDetails());
+		}
+		compoundRequest.setDataSets(allDataSets);
+
+		compoundStatus.setState(ODFUtils.combineStates(allStates));
+		compoundStatus.setRequest(compoundRequest);
+		compoundStatus.setDetails(Utils.joinStrings(allMessages, COMPOUND_REQUEST_SEPARATOR));
+		return compoundStatus;
+	}
+
+	/**
+	 * Retrieve statistics about all previous ODF analysis requests
+	 *
+	 * @return Request summary
+	 */
+	public AnalysisRequestSummary getAnalysisStats() {
+		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
+		return store.getRequestSummary();
+	}
+
+	/**
+	 * Retrieve status details of recent ODF analysis requests
+	 *
+	 * @param offset Starting offset (use 0 to start with the latest request)
+	 * @param limit Maximum number of analysis requests to be returned (use -1 to retrieve all requests)
+	 * @return Status details for each discovery request
+	 */
+	public AnalysisRequestTrackers getAnalysisRequests(int offset, int limit) {
+		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
+		AnalysisRequestTrackers analysisrequestTrackers = new AnalysisRequestTrackers();
+		analysisrequestTrackers.setAnalysisRequestTrackers(store.getRecentTrackers(offset, limit));
+		return analysisrequestTrackers;
+	}
+
+	/**
+	 * Request a specific ODF discovery request to be canceled
+	 *
+	 * @param requestId Unique id of the analysis request
+	 * @return Status of the cancellation attempt
+	 */
+	public AnalysisCancelResult cancelAnalysisRequest(String requestId) {
+		return controlCenter.cancelRequest(requestId);
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java
new file mode 100755
index 0000000..798b2d3
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/annotation/InternalAnnotationStoreUtils.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.annotation;
+
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+
+public class InternalAnnotationStoreUtils {
+
+	public static void storeDiscoveryServiceResult(DiscoveryServiceResult result, AnalysisRequest req) {
+		Logger logger = Logger.getLogger(InternalAnnotationStoreUtils.class.getName());
+		AnnotationStore mds = new ODFFactory().create().getAnnotationStore();
+		mds.setAnalysisRun(req.getId());
+		if (result != null) {
+			logger.log(Level.FINE, "Persisting annotations returned by discovery service");
+			List<Annotation> annotations = result.getAnnotations();
+			if (annotations != null) {
+				for (Annotation annot : annotations) {
+					// only persist if reference was not set
+					if (annot.getReference() == null) {
+						mds.store(annot);
+					} else {
+						logger.log(Level.WARNING, "Returned annotation object has a non-null reference set and will not be persisted (reference: {0})", annot.getReference().toString());
+					}
+				}
+			}
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java
new file mode 100755
index 0000000..f779155
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigContainer.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.configuration;
+
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+/**
+ * { 
+ *  	"odf" : {...},
+ *  	"userDefined" : {...}
+ * }
+ *
+ *
+ * This class is final, because reflection is used to access getters / setters in order to merge. This doesn't work with inherited methods
+ */
+@ApiModel(description="All ODF configuration options.")
+public final class ConfigContainer {
+
+	@ApiModelProperty(value="General ODF configuration options along with details about available discovery services", required=true)
+	private ODFSettings odf;
+
+	@ApiModelProperty(value="Details about available discovery services")
+	private List<DiscoveryServiceProperties> registeredServices = null;
+
+	public List<DiscoveryServiceProperties> getRegisteredServices() {
+		return registeredServices;
+	}
+
+	public void setRegisteredServices(List<DiscoveryServiceProperties> registeredServices) {
+		this.registeredServices = registeredServices;
+	}
+
+	public ODFSettings getOdf() {
+		return odf;
+	}
+
+	public void setOdf(ODFSettings odfSettings) {
+		this.odf = odfSettings;
+	}
+
+	public void validate() throws ValidationException {
+		if (this.odf != null) {
+			odf.validate();
+		}
+		if (this.registeredServices != null) {
+			new ServiceValidator().validate("ODFConfig.registeredServices", this.registeredServices);
+		}
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java
new file mode 100755
index 0000000..7ad90e6
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ConfigManager.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.configuration;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.settings.SparkConfig;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
+
+public class ConfigManager {
+	private Logger logger = Logger.getLogger(ConfigManager.class.getName());
+	public static final String HIDDEN_PASSWORD_IDENTIFIER = "***hidden***";
+	public static final long CONFIG_UPDATE_SLEEP_BETWEEN_POLLS = 20;
+	public static final int CONFIG_UPDATE_MAX_POLLS = 1500;
+	private static final String DEFAULT_ENCRYPTED_SPARK_CONFIGS = "spark.authenticate.secret,spark.ssl.keyPassword,spark.ssl.keyStorePassword,spark.ssl.trustStorePassword";
+
+	protected ODFConfigurationStorage configurationStore;
+	protected ODFConfigNotificationPublisher notificationManager;
+
+	public ConfigManager() {
+		ODFInternalFactory f = new ODFInternalFactory();
+		this.configurationStore = f.create(ODFConfigurationStorage.class);
+		this.notificationManager = f.create(ODFConfigNotificationPublisher.class);
+	}
+
+	public ConfigContainer getConfigContainer() {
+		ConfigContainer config = configurationStore.getConfig(getDefaultConfigContainer());
+		return config;
+	}
+
+	public ConfigContainer getConfigContainerHidePasswords() {
+		ConfigContainer config = configurationStore.getConfig(getDefaultConfigContainer());
+		hidePasswords(config);
+		return config;
+	}
+
+	public void updateConfigContainer(ConfigContainer update) throws ValidationException {
+		try {
+			update = JSONUtils.cloneJSONObject(update);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+		update.validate();
+		ConfigContainer source = getConfigContainer();
+		unhideAndEncryptPasswords(update, source);
+
+		List<DiscoveryServiceProperties> newServicesToRun = new ArrayList<DiscoveryServiceProperties>();
+		if (update.getRegisteredServices() != null
+				&& source.getRegisteredServices().size() < update.getRegisteredServices().size()) {
+			// store added services if update registers new ones
+			List<DiscoveryServiceProperties> newRegisteredServices = new ArrayList<DiscoveryServiceProperties>();
+			newRegisteredServices.addAll(update.getRegisteredServices());
+			for (DiscoveryServiceProperties oldService : source.getRegisteredServices()) {
+				for (int no = 0; no < newRegisteredServices.size(); no++) {
+					if (newRegisteredServices.get(no).getId().equals(oldService.getId())) {
+						newRegisteredServices.remove(no);
+						break;
+					}
+				}
+			}
+
+			newServicesToRun.addAll(newRegisteredServices);
+		}
+
+		Utils.mergeODFPOJOs(source, update);
+		configurationStore.storeConfig(source);
+
+		if (source.getOdf().getRunNewServicesOnRegistration() && !newServicesToRun.isEmpty()) {
+			runNewServices(newServicesToRun);
+		}
+
+		String changeId = UUID.randomUUID().toString();
+		configurationStore.addPendingConfigChange(changeId);
+		this.notificationManager.publishConfigChange(source, changeId);
+		for (int i=0; i < CONFIG_UPDATE_MAX_POLLS; i++) {
+			if (!configurationStore.isConfigChangePending(changeId)) {
+				logger.log(Level.INFO, MessageFormat.format("Config change id ''{0}'' successfully completed after {1} msec.", new Object[] { changeId, i * CONFIG_UPDATE_SLEEP_BETWEEN_POLLS } ));
+				return;
+			}
+			try {
+				Thread.sleep(CONFIG_UPDATE_SLEEP_BETWEEN_POLLS);
+			} catch (InterruptedException e) {
+				// Ignore interrupt
+				logger.log(Level.WARNING, "Sleep period was interrupted", e);
+			}
+		}
+		logger.log(Level.WARNING, MessageFormat.format("Config change did not complete after {0} msec.", CONFIG_UPDATE_SLEEP_BETWEEN_POLLS * CONFIG_UPDATE_MAX_POLLS));
+	}
+
+	public void resetConfigContainer() {
+		logger.warning("resetting ODF configuration!");
+		configurationStore.storeConfig(getDefaultConfigContainer());
+	}
+
+	private static String defaultConfig = null;
+
+	List<DiscoveryServiceProperties> getServicesFoundOnClassPath() throws IOException, JSONException {
+		ClassLoader cl = this.getClass().getClassLoader();
+		Enumeration<URL> services = cl.getResources("META-INF/odf/odf-services.json");
+		List<DiscoveryServiceProperties> result = new ArrayList<>();
+		while (services.hasMoreElements()) {
+			URL url = services.nextElement();
+			InputStream is = url.openStream();
+			String json = Utils.getInputStreamAsString(is, "UTF-8");
+			logger.log(Level.INFO, "Service found on the classpath at {0}: {1}", new Object[] { url, json });
+			result.addAll(JSONUtils.fromJSONList(json, DiscoveryServiceProperties.class));
+		}
+		logger.log(Level.INFO, "Number of classpath services found: {0}", result.size());
+		return result;
+	}
+
+	private ConfigContainer getDefaultConfigContainer() {
+		if (defaultConfig == null) {			
+			try {
+				ConfigContainer config = new ODFInternalFactory().create(Environment.class).getDefaultConfiguration();
+				// now look for services found on the classpath
+				config.getRegisteredServices().addAll(getServicesFoundOnClassPath());
+				defaultConfig = JSONUtils.toJSON(config);
+			} catch (IOException | JSONException e) {
+				String msg = "Default config could not be loaded or parsed!";
+				logger.severe(msg);
+				throw new RuntimeException(msg, e);
+			}
+		}
+		try {
+			return JSONUtils.fromJSON(defaultConfig, ConfigContainer.class);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+	}
+
+	private void runNewServices(List<DiscoveryServiceProperties> newServices) {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		List<String> servicesToRun = new ArrayList<String>();
+		for (DiscoveryServiceProperties info : newServices) {
+			servicesToRun.add(info.getId());
+		}
+
+		AnalysisRequest req = new AnalysisRequest();
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		req.setDiscoveryServiceSequence(servicesToRun);
+		req.setDataSets(mds.search(mds.newQueryBuilder().objectType("DataSet").build()));
+		req.setIgnoreDataSetCheck(true);
+		cc.startRequest(req);
+	}
+
+	private void unhideAndEncryptPasswords(ConfigContainer updatedConfigContainer,
+			ConfigContainer originalConfiguration) {
+		if (updatedConfigContainer.getOdf() != null) {
+			String odfPassword = updatedConfigContainer.getOdf().getOdfPassword();
+			if (odfPassword != null) {
+				if (odfPassword.equals(HIDDEN_PASSWORD_IDENTIFIER)) {
+					// Password was not changed, therefore keep original
+					// encrypted password
+					updatedConfigContainer.getOdf().setOdfPassword(originalConfiguration.getOdf().getOdfPassword());
+				} else if (!Encryption.isEncrypted(odfPassword)) {
+					updatedConfigContainer.getOdf().setOdfPassword(Encryption.encryptText(odfPassword));
+				}
+			}
+			if (updatedConfigContainer.getOdf().getSparkConfig() != null) {
+				SparkConfig updatedSparkConfig = updatedConfigContainer.getOdf().getSparkConfig();
+				if (updatedSparkConfig.getConfigs() != null) {
+					List<String> encryptedSparkConfigs = Arrays.asList(DEFAULT_ENCRYPTED_SPARK_CONFIGS.split(","));
+					for (String configName : updatedSparkConfig.getConfigs().keySet()) {
+						if (encryptedSparkConfigs.contains(configName)) {
+							String updatedConfigValue = (String) updatedSparkConfig.getConfigs().get(configName);
+							if (updatedConfigValue.equals(HIDDEN_PASSWORD_IDENTIFIER)) {
+								// Encrypted value was not changed, therefore keep original
+								// Encrypted value
+								SparkConfig originalSparkConfig = originalConfiguration.getOdf().getSparkConfig();
+								updatedSparkConfig.setConfig(configName, originalSparkConfig.getConfigs().get(configName));
+							} else if (!Encryption.isEncrypted(updatedConfigValue)) {
+								updatedSparkConfig.setConfig(configName, Encryption.encryptText(updatedConfigValue));
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	private void hidePasswords(ConfigContainer configContainer) {
+		if (configContainer.getOdf() != null) {
+			if (configContainer.getOdf().getOdfPassword() != null) {
+				configContainer.getOdf().setOdfPassword(HIDDEN_PASSWORD_IDENTIFIER);
+			}
+			if ((configContainer.getOdf().getSparkConfig() != null)){
+				SparkConfig sparkConfig = configContainer.getOdf().getSparkConfig();
+				if (sparkConfig.getConfigs() != null) {
+					List<String> encryptedSparkConfigs = Arrays.asList(DEFAULT_ENCRYPTED_SPARK_CONFIGS.split(","));
+					for (String configName : sparkConfig.getConfigs().keySet()) {
+						if (((encryptedSparkConfigs.contains(configName)) && (sparkConfig.getConfigs().get(configName)) != null)) {
+							sparkConfig.setConfig(configName, HIDDEN_PASSWORD_IDENTIFIER);
+						}
+					}
+				}
+			}
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java
new file mode 100755
index 0000000..a7f822f
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ODFConfigNotificationPublisher.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.configuration;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ODFConfigNotificationPublisher {
+
+	Logger logger = Logger.getLogger(ODFConfigNotificationPublisher.class.getName());
+
+	public void publishConfigChange(ConfigContainer update, String changeId) {
+		try {
+			logger.log(Level.FINE, "publishing config change: {0}", JSONUtils.toJSON(update));
+			ConfigContainer clone = JSONUtils.fromJSON(JSONUtils.toJSON(update), ConfigContainer.class);
+			AdminMessage amsg = new AdminMessage();
+			amsg.setId(changeId);
+			amsg.setAdminMessageType(Type.CONFIGCHANGE);
+			amsg.setConfigUpdateDetails(clone);
+			amsg.setDetails("Configuration update");
+			DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
+			qm.enqueueInAdminQueue(amsg);
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An unexpected exception occurres when writing to admin queue. Ignoring it", exc);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java
new file mode 100755
index 0000000..011d728
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/configuration/ServiceValidator.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.configuration;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.settings.validation.PropertyValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+
+public class ServiceValidator implements PropertyValidator {
+
+	public void validate(String property, Object value) throws ValidationException {
+		validate(property, value, true);
+	}
+
+	private void validate(String property, Object value, boolean topLevel) throws ValidationException {
+		if (value == null) {
+			throw new ValidationException("Null values are not allowed for this property");
+		}
+
+		if (value instanceof List) {
+			List<DiscoveryServiceProperties> newServices = (List<DiscoveryServiceProperties>) value;
+			List<String> ids = new ArrayList<String>();
+			for (int no = 0; no < newServices.size(); no++) {
+				DiscoveryServiceProperties service = (DiscoveryServiceProperties) newServices.get(no);
+				validate(property, service, false);
+				String serviceId = service.getId();
+				if (ids.contains(serviceId)) {
+					throw new ValidationException(property, MessageFormat.format("you cannot register multiple services with the same id {0}!", serviceId));
+				} else {
+					ids.add(serviceId);
+				}
+			}
+		} else if (value instanceof DiscoveryServiceProperties) {
+			DiscoveryServiceProperties service = (DiscoveryServiceProperties) value;
+			if (service.getId() == null || service.getId().trim().isEmpty() || service.getName() == null || service.getName().trim().isEmpty() || service.getEndpoint() == null) {
+				throw new ValidationException(property, MessageFormat.format("A service requires {0}", "id, name and an endpoint"));
+			}
+
+			if (topLevel) {
+				List<String> regServices = new ArrayList<String>();
+				List<DiscoveryServiceProperties> services = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties();
+				for (DiscoveryServiceProperties regService : services) {
+					regServices.add(regService.getId());
+				}
+
+				if (regServices.contains(service.getId())) {
+					throw new ValidationException(property, MessageFormat.format("a service with id {0} already exists!", service.getId()));
+				}
+			}
+
+			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(service);
+			runtime.validate(service);
+		} else {
+			throw new ValidationException(property, "only DiscoveryServiceRegistrationInfo objects or list of such objects are allowed for this property");
+		}
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java
new file mode 100755
index 0000000..fffff6f
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminMessage.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+
+// JSON
+public class AdminMessage {
+	public static enum Type {
+		SHUTDOWN, RESTART, CONFIGCHANGE
+	}
+
+	private Type adminMessageType;
+	private String details;
+	private ConfigContainer configUpdateDetails;
+	private String messageId;
+
+	public Type getAdminMessageType() {
+		return adminMessageType;
+	}
+
+	public void setAdminMessageType(Type adminMessageType) {
+		this.adminMessageType = adminMessageType;
+	}
+
+	public String getDetails() {
+		return details;
+	}
+
+	public void setDetails(String details) {
+		this.details = details;
+	}
+
+	public ConfigContainer getConfigUpdateDetails() {
+		return configUpdateDetails;
+	}
+
+	public void setConfigUpdateDetails(ConfigContainer configUpdateDetails) {
+		this.configUpdateDetails = configUpdateDetails;
+	}
+
+	public String getId() {
+		return this.messageId;
+	}
+
+	public void setId(String messageId) {
+		this.messageId = messageId;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java
new file mode 100755
index 0000000..874e061
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AdminQueueProcessor.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.core.ODFInitializer;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class AdminQueueProcessor implements QueueMessageProcessor {
+
+	private Logger logger = Logger.getLogger(AdminQueueProcessor.class.getName());
+
+	@Override
+	public void process(ExecutorService executorService, String msg, int partition, long offset) {
+		AdminMessage adminMessage;
+		try {
+			adminMessage = JSONUtils.fromJSON(msg, AdminMessage.class);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+		switch (adminMessage.getAdminMessageType()) {
+		case SHUTDOWN:
+			initiateShutdown(executorService, false);
+			break;
+		case RESTART:
+			initiateShutdown(executorService, true);
+			break;
+		default:
+			// do nothing
+		}
+	}
+
+	static Object restartLockObject = new Object();
+
+	private void initiateShutdown(ExecutorService executorService, final boolean restart) {
+		logger.log(Level.INFO, "Shutdown of ODF was requested...");
+		Runnable shutDownRunnable = new Runnable() {
+
+			@Override
+			public void run() {
+				logger.log(Level.INFO, "Initiating shutdown");
+
+				// sleep some time before initiating the actual shutdown to give the process() a chance to return
+				// before it is itself shut down
+				long sleepTimeBeforeShutdown = 1000;
+				try {
+					Thread.sleep(sleepTimeBeforeShutdown);
+				} catch (InterruptedException e) {
+					// do nothing
+					e.printStackTrace();
+				}
+
+				synchronized (restartLockObject) {
+					logger.log(Level.INFO, "Shutting down ODF...");
+					try {
+						ODFInitializer.stop();
+						logger.log(Level.INFO, "ODF was shutdown");
+											
+						if (restart) {
+							logger.log(Level.INFO, "Restarting ODF");
+							ODFInitializer.start();
+							logger.log(Level.INFO, "ODF restarted");
+						}
+					}  catch (Exception e) {
+						logger.log(Level.SEVERE, "An unexpected error occurred when shutting down ODF", e);
+					}
+				}
+
+			}
+
+		};
+
+		executorService.submit(shutDownRunnable);
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java
new file mode 100755
index 0000000..e43bd45
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AnalysisRequestTrackerStore.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.List;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+
+
+public interface AnalysisRequestTrackerStore {
+	
+	/**
+	 * set the status of old requests which were last modified before the cutOffTimestamp
+	 * with an optional detailsMessage
+	 */
+	void setStatusOfOldRequest(long cutOffTimestamp, STATUS status, String detailsMessage);
+	
+	// store / update the passed tracker
+	void store(AnalysisRequestTracker tracker);
+	
+	AnalysisRequestTracker query(String analysisRequestId);
+
+	AnalysisRequestTracker findSimilarQueuedRequest(AnalysisRequest request);
+	
+	/**
+	 * @param number - number of trackers to retrieve, -1 for all
+	 * @return
+	 */
+	List<AnalysisRequestTracker> getRecentTrackers(int offset, int limit);
+	
+	/**
+	 * Clear any internal caches, if any.
+	 */
+	void clearCache(); 
+
+	int getSize();
+
+	AnalysisRequestSummary getRequestSummary();
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java
new file mode 100755
index 0000000..8100f18
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/AsyncDiscoveryServiceWrapper.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.core.Utils;
+
+public class AsyncDiscoveryServiceWrapper implements SyncDiscoveryService {
+
+	AsyncDiscoveryService wrappedService = null;
+
+	public AsyncDiscoveryServiceWrapper(AsyncDiscoveryService wrappedService) {
+		this.wrappedService = wrappedService;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		try {
+			DiscoveryServiceAsyncStartResponse asyncResponse = wrappedService.startAnalysis(request);
+			ResponseCode code = asyncResponse.getCode();
+			if (code != ResponseCode.OK) {
+				DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+				response.setCode(code);
+				response.setDetails(asyncResponse.getDetails());
+				return response;
+			}
+			// poll the async service
+			final long maxWaitTimeSecs = Utils.getEnvironmentProperty("odf.async.max.wait.secs", 10 * 60); // default: 10 minutes
+			final long pollingIntervalMS = Utils.getEnvironmentProperty("odf.async.poll.interval.ms", 1000);
+			long maxPolls = (maxWaitTimeSecs * 1000) / pollingIntervalMS;
+			int pollCounter = 0;
+
+			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+			String runId = asyncResponse.getRunId();
+			while (pollCounter < maxPolls) {
+				Thread.sleep(pollingIntervalMS);
+				DiscoveryServiceAsyncRunStatus status = wrappedService.getStatus(runId);
+				switch (status.getState()) {
+				case NOT_FOUND:
+					// should not happen
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Run ID " + runId + " was not found. This should not have happened.");
+					return response;
+				case ERROR:
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails(status.getDetails());
+					return response;
+				case FINISHED:
+					response.setCode(ResponseCode.OK);
+					response.setDetails(status.getDetails());
+					response.setResult(status.getResult());
+					return response;
+				default:
+					// continue polling
+					pollCounter++;
+				}
+			}
+			response.setCode(ResponseCode.UNKNOWN_ERROR);
+			response.setDetails("Polled Async service for " + maxWaitTimeSecs + " seconds without positive result");
+			return response;
+		} catch (Exception exc) {
+			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+			response.setCode(ResponseCode.UNKNOWN_ERROR);
+			response.setDetails("An unknown error occurred: " + Utils.getExceptionAsString(exc));
+			return response;
+		}
+	}
+
+	public void setExecutorService(ExecutorService executorService) {
+		wrappedService.setExecutorService(executorService);
+	}
+
+	public void setMetadataStore(MetadataStore metadataStore) {
+		wrappedService.setMetadataStore(metadataStore);
+	}
+
+	public void setAnnotationStore(AnnotationStore annotationStore) {
+		wrappedService.setAnnotationStore(annotationStore);
+	}
+
+	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
+		return wrappedService.checkDataSet(dataSetContainer);
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java
new file mode 100755
index 0000000..bcd2965
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ConfigChangeQueueProcessor.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
+import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ConfigChangeQueueProcessor implements QueueMessageProcessor {
+
+	Logger logger = Logger.getLogger(ConfigChangeQueueProcessor.class.getName());
+	
+	@Override
+	public void process(ExecutorService executorService, String msg, int partition, long offset) {
+		try {
+			AdminMessage amsg = JSONUtils.fromJSON(msg, AdminMessage.class);
+			if (Type.CONFIGCHANGE.equals(amsg.getAdminMessageType())) {
+				logger.info("Received config change: " + JSONUtils.toJSON(amsg));
+				ODFInternalFactory f = new ODFInternalFactory();
+				ODFConfigurationStorage configStorage = f.create(ODFConfigurationStorage.class);
+				configStorage.onConfigChange(amsg.getConfigUpdateDetails());
+				configStorage.removePendingConfigChange(amsg.getId());
+			}
+		} catch(Exception exc) {
+			logger.log(Level.WARNING, "An exception occurred while processing admin message", exc);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java
new file mode 100755
index 0000000..4ffa195
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ControlCenter.java
@@ -0,0 +1,454 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.api.analysis.*;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.core.Utils;
+
+public class ControlCenter {
+
+	private static final String CLASSNAME = ControlCenter.class.getName();
+	private Logger logger = Logger.getLogger(ControlCenter.class.getName());
+
+	public static final String HEALTH_TEST_DISCOVERY_SERVICE_ID = "odf-health-test-discovery-service-id";
+	public static final String HEALTH_TEST_DATA_SET_ID_PREFIX = "odf-health-test-dummy-data-set-id";
+
+	DiscoveryServiceQueueManager queueManager = null;
+	AnalysisRequestTrackerStore store = null;
+	Environment environment = null;
+	OpenDiscoveryFramework odf;
+
+	public ControlCenter() {
+		ODFInternalFactory f = new ODFInternalFactory();
+		queueManager = f.create(DiscoveryServiceQueueManager.class);
+		store = f.create(AnalysisRequestTrackerStore.class);
+		odf = new ODFFactory().create();
+		environment = f.create(Environment.class);
+	}
+
+	private String createNewRequestId() {
+		return "odf-request-" + UUID.randomUUID().toString() + "_" + System.currentTimeMillis();
+	}
+
+	public DiscoveryServiceQueueManager getQueueManager() {
+		return queueManager;
+	}
+
+	public AnalysisResponse startRequest(AnalysisRequest request) {
+		final String METHODNAME = "startRequest()";
+		logger.entering(CLASSNAME, METHODNAME);
+		AnalysisResponse response = new AnalysisResponse();
+		AnalysisRequest requestWithServiceSequence = null;
+		try {
+			requestWithServiceSequence = JSONUtils.fromJSON(JSONUtils.toJSON(request), AnalysisRequest.class);
+		} catch (JSONException e) {
+			throw new RuntimeException("Error cloning analysis request.");
+		}
+		if ((request.getDiscoveryServiceSequence() == null) || request.getDiscoveryServiceSequence().isEmpty()) {
+			DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
+			List<String> discoveryServiceSequence = mapper.getRecommendedDiscoveryServiceSequence();
+			logger.log(Level.INFO, "Using discovery service sequence: " + Utils.joinStrings(discoveryServiceSequence, ','));
+			if (discoveryServiceSequence == null) {
+				response.setId(request.getId());
+				response.setInvalidRequest(true);
+				response.setDetails("No suitable discovery services found to create the requested annotation types.");
+				return response;
+			}
+			requestWithServiceSequence.setDiscoveryServiceSequence(discoveryServiceSequence);
+		}
+		try {
+			//Initialize queues to make sure analysis can be started
+			queueManager.start();
+		} catch (TimeoutException e) {
+			logger.warning("queues could not be started in time");
+		}
+		AnalysisRequestTracker similarTracker = store.findSimilarQueuedRequest(requestWithServiceSequence);
+		if (similarTracker != null) {
+			logger.log(Level.WARNING, "A similar request for the issued one is already in the queue.");
+			logger.log(Level.FINE, "A similar request for the issued one is already in the queue. Original request: {0}, found similar request: {1}",
+					new Object[] { JSONUtils.lazyJSONSerializer(requestWithServiceSequence),
+					JSONUtils.lazyJSONSerializer(similarTracker) });
+		}
+		String newRequestId = createNewRequestId();
+		response.setId(newRequestId);
+		requestWithServiceSequence.setId(newRequestId);
+		AnalysisRequestTracker tracker = createTracker(requestWithServiceSequence, response);
+		// if request is invalid, response was already modified and null is returned
+		if (tracker != null) {
+			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.IN_DISCOVERY_SERVICE_QUEUE);
+			logger.log(Level.FINE, "Starting new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
+			store.store(tracker);
+			logger.log(Level.FINEST, "Stored tracker for new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
+			queueManager.enqueue(tracker);
+			logger.log(Level.FINEST, "Tracker enqueued for new request with ID ''{0}''. Tracker: {1}", new Object[] { newRequestId, JSONUtils.lazyJSONSerializer(tracker) });
+		}
+		logger.exiting(CLASSNAME, METHODNAME);
+		return response;
+	}
+
+	public AnalysisRequestStatus getRequestStatus(String requestId) {
+		final String METHODNAME = "getRequestStatus(String)";
+		logger.entering(CLASSNAME, METHODNAME);
+		AnalysisRequestStatus result = new AnalysisRequestStatus();
+		AnalysisRequestTracker tracker = store.query(requestId);
+		if (tracker == null) {
+			result.setState(AnalysisRequestStatus.State.NOT_FOUND);
+		} else {
+			AnalysisRequestStatus.State state = null;
+			switch (tracker.getStatus()) {
+			case INITIALIZED:
+			case IN_DISCOVERY_SERVICE_QUEUE:
+				state = AnalysisRequestStatus.State.QUEUED;
+				break;
+			case ERROR:
+				state = AnalysisRequestStatus.State.ERROR;
+				break;
+			case DISCOVERY_SERVICE_RUNNING:
+				state = AnalysisRequestStatus.State.ACTIVE;
+				break;
+			case FINISHED:
+				state = AnalysisRequestStatus.State.FINISHED;
+				break;
+			case CANCELLED:
+				state = AnalysisRequestStatus.State.CANCELLED;
+			default:
+				;
+			}
+			result.setState(state);
+			result.setDetails(tracker.getStatusDetails());
+			result.setRequest(tracker.getRequest());
+
+			long totalProcessingTime = 0;
+			long totalQueuingTime = 0;
+			long totalTimeSpentStoringAnnotations = 0;
+
+			List<DiscoveryServiceRequest> requests = new ArrayList<DiscoveryServiceRequest>();
+			for (DiscoveryServiceRequest req : tracker.getDiscoveryServiceRequests()) {
+				DiscoveryServiceRequest copyReq = new DiscoveryServiceRequest();
+				copyReq.setDiscoveryServiceId(req.getDiscoveryServiceId());
+				long putOnQueue = req.getPutOnRequestQueue();
+				long startedProcessing = req.getTakenFromRequestQueue();
+				long finishedProcessing = req.getFinishedProcessing();
+
+				totalProcessingTime += (finishedProcessing > 0 ? finishedProcessing - startedProcessing : finishedProcessing);
+				totalQueuingTime += (startedProcessing > 0 ? startedProcessing - putOnQueue : startedProcessing);
+				totalTimeSpentStoringAnnotations += req.getTimeSpentStoringResults();
+
+				copyReq.setFinishedProcessing(finishedProcessing);
+				copyReq.setPutOnRequestQueue(putOnQueue);
+				copyReq.setTakenFromRequestQueue(startedProcessing);
+				requests.add(copyReq);
+			}
+
+			result.setTotalTimeOnQueues(totalQueuingTime);
+			result.setTotalTimeProcessing(totalProcessingTime);
+			result.setTotalTimeStoringAnnotations(totalTimeSpentStoringAnnotations);
+			result.setServiceRequests(requests);
+		}
+		logger.log(Level.FINE, "Returning request status object {0}", JSONUtils.lazyJSONSerializer(result));
+		logger.exiting(CLASSNAME, METHODNAME);
+		return result;
+	}
+
+	public AnalysisCancelResult cancelRequest(String requestId) {
+		final String METHODNAME = "cancelRequest(String)";
+		logger.entering(CLASSNAME, METHODNAME);
+
+		AnalysisCancelResult result = new AnalysisCancelResult();
+		result.setState(AnalysisCancelResult.State.NOT_FOUND);
+
+		AnalysisRequestTracker request = store.query(requestId);
+		//TODO implement cancellation of running instead of only queued requests.
+		if (request != null) {
+			if (TrackerUtil.isCancellable(request)) {
+				request.setStatus(AnalysisRequestTrackerStatus.STATUS.CANCELLED);
+				store.store(request);
+				logger.info("cancelled request with id " + requestId);
+				result.setState(AnalysisCancelResult.State.SUCCESS);
+			} else {
+				logger.log(Level.FINER, "Request ''{0}'' could not be cancelled. State ''{1}'', next request number:. ''{2}''", new Object[]{requestId, request.getStatus(), request.getNextDiscoveryServiceRequest()});
+				result.setState(AnalysisCancelResult.State.INVALID_STATE);
+			}
+		}
+		logger.exiting(CLASSNAME, METHODNAME);
+		return result;
+	}
+
+	private AnalysisRequestTracker createTracker(AnalysisRequest request, AnalysisResponse response) {
+		DiscoveryServiceManager discoveryServiceManager = odf.getDiscoveryServiceManager();
+		List<DiscoveryServiceProperties> registeredServices = new ArrayList<>(discoveryServiceManager.getDiscoveryServicesProperties());
+		registeredServices.add(HealthCheckServiceRuntime.getHealthCheckServiceProperties());
+		String currentUser = this.environment.getCurrentUser();
+
+		/*
+		List<MetaDataObjectReference> datasets = request.getDataSets();
+		
+		if (datasets.size() == 1 && datasets.get(0).getId().startsWith(HEALTH_TEST_DATA_SET_ID_PREFIX)) {
+			// health test mode
+			AnalysisRequestTracker healthTestTracker = new AnalysisRequestTracker();
+			DiscoveryServiceRequest dssr = new DiscoveryServiceRequest();
+			dssr.setOdfRequestId(request.getId());
+			dssr.setDiscoveryServiceId(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
+			String odfUrl = new ODFFactory().create().getSettingsManager().getODFSettings().getOdfUrl();
+			dssr.setOdfUrl(odfUrl);
+			MetaDataObjectReference dsr = datasets.get(0);
+			
+			DataSetContainer dataSetContainer = new DataSetContainer();
+			DataSet oMDataSet = new UnknownDataSet();	
+			oMDataSet.setReference(dsr);
+			dataSetContainer.setDataSet(oMDataSet);
+			
+			dssr.setDataSetContainer(dataSetContainer);
+			dssr.setUser(currentUser);
+			dssr.setAdditionalProperties(request.getAdditionalProperties());
+			healthTestTracker.setDiscoveryServiceRequests(Collections.singletonList(dssr));
+			healthTestTracker.setRequest(request);
+			healthTestTracker.setStatus(STATUS.INITIALIZED);
+			Utils.setCurrentTimeAsLastModified(healthTestTracker);
+			healthTestTracker.setUser(currentUser);
+			response.setDetails("Request is a special health test request.");
+			return healthTestTracker;
+		}
+		*/
+
+		List<DiscoveryServiceRequest> startRequests = new ArrayList<DiscoveryServiceRequest>();
+		List<String> discoveryServiceSequence = request.getDiscoveryServiceSequence();
+		if (discoveryServiceSequence != null && !discoveryServiceSequence.isEmpty()) {
+			logger.log(Level.FINE, "Request issued with fixed discovery service sequence: {0}", discoveryServiceSequence);
+			// first check if discoveryService IDs are valid
+			Set<String> foundDSs = new HashSet<String>(discoveryServiceSequence);
+			for (String ds : discoveryServiceSequence) {
+				for (DiscoveryServiceProperties regInfo : registeredServices) {
+					if (regInfo.getId().equals(ds)) {
+						foundDSs.remove(ds);
+					}
+				}
+			}
+			// if there are some IDs left that were not found 
+			if (!foundDSs.isEmpty()) {
+				String msg = MessageFormat.format("The discovery services {0} could not be found", Utils.collectionToString(foundDSs, ","));
+				logger.log(Level.WARNING, msg);
+				response.setInvalidRequest(true);
+				response.setDetails(msg);
+				return null;
+			}
+
+			// for each data set process all discovery services
+			// (possible alternative, not used here: for all discovery services process each data set)
+			for (MetaDataObjectReference dataSetId : request.getDataSets()) {
+				MetaDataObject mdo = null;
+				if (dataSetId.getId().startsWith(HEALTH_TEST_DATA_SET_ID_PREFIX)) {
+					mdo = new UnknownDataSet();
+					mdo.setReference(dataSetId);
+				} else {
+					mdo = odf.getMetadataStore().retrieve(dataSetId);
+				}
+				if (mdo == null) {
+					String msg = MessageFormat.format("The meta data object id ''{0}'' does not reference an existing metadata object. Request will be set to error.", dataSetId.toString());
+					logger.log(Level.WARNING, msg);
+					response.setInvalidRequest(true);
+					response.setDetails(msg);
+					return null;
+				}
+				if (dataSetId.getUrl() == null) {
+					dataSetId.setUrl(mdo.getReference().getUrl());
+				}
+				for (String ds : discoveryServiceSequence) {
+					DiscoveryServiceRequest req = new DiscoveryServiceRequest();
+					DataSetContainer dataSetContainer = new DataSetContainer();
+					dataSetContainer.setDataSet(mdo);
+					req.setDataSetContainer(dataSetContainer);
+					req.setOdfRequestId(request.getId());
+					req.setDiscoveryServiceId(ds);
+					req.setUser(currentUser);
+					req.setAdditionalProperties(request.getAdditionalProperties());
+					String odfUrl = odf.getSettingsManager().getODFSettings().getOdfUrl();
+					req.setOdfUrl(odfUrl);
+					for (DiscoveryServiceProperties dsri : odf.getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
+						if (dsri.getId().equals(ds)) {
+							if (dsri.getEndpoint().getRuntimeName().equals(SparkServiceRuntime.SPARK_RUNTIME_NAME)) {
+								req.setOdfUser(odf.getSettingsManager().getODFSettings().getOdfUser());
+								//Note that the password has to be provided as plain text here because the remote service cannot decrypt it otherwise.
+								//TODO: Consider to provide a temporary secure token instead of the password.
+								req.setOdfPassword(Encryption.decryptText(odf.getSettingsManager().getODFSettings().getOdfPassword()));
+							}
+						}
+					}
+					startRequests.add(req);
+				}
+			}
+		} else {
+			String msg = "The request didn't contain any processing hints. ODF cannot process a request without an analysis sequence.";
+			logger.log(Level.WARNING, msg);
+			response.setInvalidRequest(true);
+			response.setDetails(msg);
+			return null;
+		}
+
+		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
+		tracker.setDiscoveryServiceRequests(startRequests);
+		tracker.setNextDiscoveryServiceRequest(0);
+		tracker.setRequest(request);
+		tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.INITIALIZED);
+		Utils.setCurrentTimeAsLastModified(tracker);
+		tracker.setUser(currentUser);
+		return tracker;
+	}
+	
+	boolean requiresMetaDataCache(DiscoveryService service) {
+		return service instanceof SparkDiscoveryServiceProxy;
+	}
+
+	public static SyncDiscoveryService getDiscoveryServiceProxy(String discoveryServiceId, AnalysisRequest request) {
+		try {
+			ODFInternalFactory factory = new ODFInternalFactory();
+			DiscoveryServiceManager dsm = factory.create(DiscoveryServiceManager.class);
+			DiscoveryServiceProperties serviceProps = null;
+			if (discoveryServiceId.startsWith(HEALTH_TEST_DISCOVERY_SERVICE_ID)) {
+				serviceProps = HealthCheckServiceRuntime.getHealthCheckServiceProperties();
+			} else {
+				serviceProps = dsm.getDiscoveryServiceProperties(discoveryServiceId);
+			}
+			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(discoveryServiceId);
+			if (runtime == null) {
+				throw new RuntimeException(MessageFormat.format("Service runtime for service ''{0}'' was not found.", discoveryServiceId));
+			}
+			DiscoveryService runtimeProxy = runtime.createDiscoveryServiceProxy(serviceProps);
+			SyncDiscoveryService proxy = null;
+			if (runtimeProxy instanceof AsyncDiscoveryService) {
+				proxy = new AsyncDiscoveryServiceWrapper( (AsyncDiscoveryService) runtimeProxy);
+			} else {
+				proxy = (SyncDiscoveryService) runtimeProxy;
+			}
+			proxy.setMetadataStore(factory.create(MetadataStore.class));
+			AnnotationStore as = factory.create(AnnotationStore.class);
+			if (request != null) {
+				as.setAnalysisRun(request.getId());
+			}
+			proxy.setAnnotationStore(as);
+			return proxy;
+		} catch (ServiceNotFoundException exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+
+	/**
+	 * package private helper method that can be called when the current discovery service was finished
+	 * and you want to advance to the next.
+	 * NOTE: This should only be called once for all nodes, i.e., typically from a Kafka consumer
+	 *       that has runs on all nodes with the same consumer group ID.
+	 * 
+	 * @param dsRunID runID is just used for logging, could be any value
+	 * @param dsID
+	 */
+	void advanceToNextDiscoveryService(final AnalysisRequestTracker tracker) {
+		DiscoveryServiceRequest req = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+		DiscoveryServiceResponse resp = TrackerUtil.getCurrentDiscoveryServiceStartResponse(tracker);
+		String dsRunID = "N/A";
+		if (resp instanceof DiscoveryServiceAsyncStartResponse) {
+			dsRunID = ((DiscoveryServiceAsyncStartResponse) resp).getRunId();
+		}
+		String dsID = req.getDiscoveryServiceId();
+
+		TrackerUtil.moveToNextDiscoveryService(tracker);
+		DiscoveryServiceRequest nextDSReq = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+		if (nextDSReq == null) {
+			logger.log(Level.FINER, "DSWatcher: Run ''{0}'' of DS ''{1}'' was last of request ''{2}'', marking overall request as finished",
+					new Object[] { dsRunID, dsID, tracker.getRequest().getId() });
+			// overall request is finished
+			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.FINISHED);
+			tracker.setStatusDetails("All discovery services ran successfully");
+			
+			// now propagate annotations if configured
+			logger.log(Level.FINE, "Request is finished, checking for annotation propagation");
+			Boolean doPropagation = odf.getSettingsManager().getODFSettings().getEnableAnnotationPropagation();
+			if (Boolean.TRUE.equals(doPropagation)) {
+				TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+				try {
+					transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+						
+						@Override
+						public Object call() throws Exception {
+							AnnotationPropagator ap = odf.getMetadataStore().getAnnotationPropagator();
+							if (ap != null) {
+								logger.log(Level.FINE, "Annotation Propagator exists, running propagation");
+								try {
+									ap.propagateAnnotations(new ODFFactory().create().getAnnotationStore(), tracker.getRequest().getId());
+								} catch(Exception exc) {
+									logger.log(Level.SEVERE, "An unexcepted exception occurred while propagating annotations", exc);
+									tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.ERROR);
+									String msg = MessageFormat.format("An unexpected exception occured while propagating annotations: ''{0}''", Utils.getExceptionAsString(exc));
+									tracker.setStatusDetails(msg);
+								}
+							}
+							return null;
+						}
+					});
+				} catch (Exception e) {
+					// should never happen as exception is handled inside the callable
+					throw new RuntimeException(e);
+				}
+			}
+		} else {
+			logger.log(Level.FINER, "DSWatcher: Run ''{0}'' of DS ''{1}'' was not the last of request ''{2}'', moving over to next request",
+					new Object[] { dsRunID, dsID, tracker.getRequest().getId() });
+			tracker.setStatus(AnalysisRequestTrackerStatus.STATUS.IN_DISCOVERY_SERVICE_QUEUE);
+			queueManager.enqueue(tracker);
+		}
+		Utils.setCurrentTimeAsLastModified(tracker);
+		store.store(tracker);
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java
new file mode 100755
index 0000000..9b16270
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DeclarativeRequestMapper.java
@@ -0,0 +1,279 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import java.text.MessageFormat;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+
+/**
+*
+* Maps a list of {@link AnnotationType} objects to a list of service ids representing concrete discovery
+* services that generate the requested annotation types.
+* 
+* Internally, this class generates a list of all possible combinations of discovery services which may be
+* used to generate the requested annotation types. The combinations are then assessed and ordered by the
+* expected execution effort and the one with the least execution effort is provided. 
+*
+*/
+public class DeclarativeRequestMapper {
+
+	private Logger logger = Logger.getLogger(DeclarativeRequestMapper.class.getName());
+
+	DiscoveryServiceManager dsManager = new ODFFactory().create().getDiscoveryServiceManager();
+	List<DiscoveryServiceProperties> dsPropList = dsManager.getDiscoveryServicesProperties();
+
+	private List<DiscoveryServiceSequence> discoveryServiceSequences = new ArrayList<DiscoveryServiceSequence>();
+
+	public DeclarativeRequestMapper(AnalysisRequest request) {
+		String messageText = "Generating possible discovery service sequences for annotation types {0}.";
+		logger.log(Level.INFO, MessageFormat.format(messageText, request.getAnnotationTypes()));
+
+		this.discoveryServiceSequences = calculateDiscoveryServiceSequences(request.getAnnotationTypes());
+		Collections.sort(this.discoveryServiceSequences, new EffortComparator());
+	}
+
+	/**
+	*
+	* Represents a single discovery service sequence.
+	*
+	*/
+	public class DiscoveryServiceSequence {
+		private LinkedHashSet<String> serviceSequence;
+
+		public DiscoveryServiceSequence() {
+			this.serviceSequence = new LinkedHashSet<String>();
+		}
+
+		public DiscoveryServiceSequence(LinkedHashSet<String> serviceIds) {
+			this.serviceSequence = serviceIds;
+		}
+
+		public LinkedHashSet<String> getServiceSequence() {
+			return this.serviceSequence;
+		}
+
+		public List<String> getServiceSequenceAsList() {
+			return new ArrayList<String>(this.serviceSequence);
+		}
+
+		@Override
+		public boolean equals(Object obj) {
+			if ((obj == null) || !(obj instanceof DiscoveryServiceSequence)) {
+				return false;
+			}
+			return this.getServiceSequence().equals(((DiscoveryServiceSequence) obj).getServiceSequence());
+		}
+
+		// Overriding hashCode method to ensure proper results of equals() method
+		// (See of http://www.javaranch.com/journal/2002/10/equalhash.html)
+		@Override
+		public int hashCode() {
+			return Utils.joinStrings(new ArrayList<String>(this.serviceSequence), ',').hashCode();
+		}
+	}
+
+	/**
+	*
+	* Internal class that estimates the effort for executing a sequence of discovery services.
+	* Should be extended to take runtime statistics into account. 
+	*
+	*/
+	private class EffortComparator implements Comparator<DiscoveryServiceSequence> {
+		public int compare(DiscoveryServiceSequence da1, DiscoveryServiceSequence da2) {
+			if (da1.getServiceSequence().size() < da2.getServiceSequence().size()) {
+				return -1;
+			} else if (da1.getServiceSequence().size() > da2.getServiceSequence().size()) {
+				return 1;
+			} else {
+				return 0;
+			}
+		}
+	}
+
+	/**
+	 * Returns the calculated list of discovery service sequences ordered by the execution effort,
+	 * starting with the sequence that is supposed to cause the minimum execution effort.
+	 *
+	 * @return List of discovery service sequences
+	 */
+	public List<DiscoveryServiceSequence> getDiscoveryServiceSequences() {
+		return this.discoveryServiceSequences;
+	}
+
+	/**
+	 * Returns recommended discovery service sequence, i.e. the one that is supposed to cause the
+	 * minimum execution effort.
+	 *
+	 * @return Discovery service sequence
+	 */
+	public List<String> getRecommendedDiscoveryServiceSequence() {
+		if (!getDiscoveryServiceSequences().isEmpty()) {
+			return new ArrayList<String>(this.discoveryServiceSequences.get(0).getServiceSequence());
+		} else {
+			return null;
+		}
+	}
+
+	/**
+	 * Remove all discovery service sequences that contain a specific service id. Use this method
+	 * to update the list of discovery service sequences after a specific discovery service has
+	 * failed and should not be used any more.
+	 *
+	 * @param serviceId Id of discovery service to be removed
+	 * @return Discovery service sequence
+	 */
+	public boolean removeDiscoveryServiceSequences(String serviceId) {
+		boolean serviceRemoved = false;
+		List<DiscoveryServiceSequence> updatedList = new ArrayList<DiscoveryServiceSequence>();
+		updatedList.addAll(this.discoveryServiceSequences);
+		for (DiscoveryServiceSequence sequence : this.discoveryServiceSequences) {
+			if (sequence.getServiceSequence().contains(serviceId)) {
+				updatedList.remove(sequence);
+				serviceRemoved = true;
+			}
+		}
+		this.discoveryServiceSequences = updatedList;
+		return serviceRemoved ? true : false;
+	}
+
+	/**
+	 * Internal method that determines all possible sequences of discovery services which could be used
+	 * to generate the requested annotation type. Using recursion, all levels of prerequisites are taken
+	 * into account.
+	 *
+	 * @param annotationType Annotation type to be generated
+	 * @return List of discovery service sequences that generate the requested annotation type
+	 */
+	private List<DiscoveryServiceSequence> getDiscoveryServiceSequencesForAnnotationType(String annotationType) {
+		List<DiscoveryServiceSequence> result = new ArrayList<DiscoveryServiceSequence>();
+		for (DiscoveryServiceProperties dsProps : this.dsPropList) {
+			if ((dsProps.getResultingAnnotationTypes() != null) && dsProps.getResultingAnnotationTypes().contains(annotationType)) {
+				DiscoveryServiceSequence da = new DiscoveryServiceSequence();
+				da.getServiceSequence().add(dsProps.getId());
+				List<DiscoveryServiceSequence> discoveryApproachesForService = new ArrayList<DiscoveryServiceSequence>();
+				discoveryApproachesForService.add(da);
+
+				// If there are prerequisite annotation types, also merge their services into the result
+				if ((dsProps.getPrerequisiteAnnotationTypes() != null)
+						&& !dsProps.getPrerequisiteAnnotationTypes().isEmpty()) {
+					discoveryApproachesForService = combineDiscoveryServiceSequences(
+							calculateDiscoveryServiceSequences(dsProps.getPrerequisiteAnnotationTypes()),
+							discoveryApproachesForService);
+					;
+				}
+				logger.log(Level.INFO, "Discovery appoaches for annotationType " + annotationType + ":");
+				for (DeclarativeRequestMapper.DiscoveryServiceSequence discoveryApproach : discoveryApproachesForService) {
+					logger.log(Level.INFO,
+							Utils.joinStrings(new ArrayList<String>(discoveryApproach.getServiceSequence()), ','));
+				}
+
+				result.addAll(discoveryApproachesForService);
+			}
+		}
+		return result;
+	}
+
+	/**
+	 * Internal method that combines two lists of discovery service sequences by generating all possible
+	 * combinations of the entries of both lists. The methods avoids duplicate services in each sequence
+	 * and duplicate sequences in the resulting list.
+	 *
+	 * @param originalSequences Original list of discovery service sequences
+	 * @param additionalSequences Second list discovery service sequences
+	 * @return Combined list of discovery service sequences
+	 */
+	private List<DiscoveryServiceSequence> combineDiscoveryServiceSequences(List<DiscoveryServiceSequence> originalSequences, List<DiscoveryServiceSequence> additionalSequences) {
+		// Example scenario for combining service sequences:
+		//
+		// Lets assume a service S that generates two annotation types AT1 and AT2 and S has prerequisite
+		// annotation type AT_P. There are two services P1 and P2 creating annotation type AT_P.
+		// The possible service sequences for generating annotation type AT1 are "P1, S" and "P2, S", same for AT2.
+		//
+		// When requesting a set of annotation types AT1 and AT2, this will result in the following four combinations
+		// which contain several redundancies:
+		// "P1, S, P1, S", "P1, S, P2, S", "P2, S, P1, S", "P2, S, P2, S"
+		// 
+		// This method uses three ways of removing redundancies:
+		//
+		// 1. Given that class DiscoveryServiceSequence internally uses LinkedHashSet, duplicate services are removed from the
+		// service sequences, resulting in: "P1, S", "P1, S, P2", "P2, S, P1", "P2, S"
+		//
+		// 2. Service sequences are only merged if the last service of the additional sequence is not already part of the original
+		// one which results in: "P1, S", "P1, S", "P2, S", "P2, S"
+		// 
+		// 3. Duplicate sequences are ignored, resulting in: "P1, S", "P2, S" which is the final result.  
+
+		List<DiscoveryServiceSequence> discoveryApproaches = new ArrayList<DiscoveryServiceSequence>();
+		for (DiscoveryServiceSequence da1 : originalSequences) {
+			for (DiscoveryServiceSequence da2 : additionalSequences) {
+				DiscoveryServiceSequence da = new DiscoveryServiceSequence();
+				da.getServiceSequence().addAll(da1.getServiceSequence());
+
+				// Add the second list only if its last serviceId is not already part of the first list
+				// (Otherwise unnecessary prerequisite services might be added, because the 2nd list may use different ones)
+				if (!da1.getServiceSequence().contains(da2.getServiceSequenceAsList().get(da2.getServiceSequenceAsList().size() - 1))) {
+					da.getServiceSequence().addAll(da2.getServiceSequence());
+				}
+
+				// Avoid duplicate entries (uses DiscoveryServiceSequence.equals() method)
+				if (!discoveryApproaches.contains(da)) {
+					discoveryApproaches.add(da);
+				}
+			}
+		}
+		return discoveryApproaches;
+	}
+
+	/**
+	 * Internal method that determines all possible sequences of discovery services which could be used
+	 * to generate a set of requested annotation types.
+	 *
+	 * Each discovery service creates one or multiple annotation types and may have prerequisite annotation types.
+	 * As there may be multiple services creating the same annotation type (maybe by using different prerequisite
+	 * annotation types), this may result in a complex dependencies. Using recursion, this method iterates through 
+	 * all the dependencies in order to calculate a list of all possible sequences of discovery services that could
+	 * be used to calculate the requested annotation types.
+	 * 
+	 * @param annotationTypes List of annotation types to be generated
+	 * @return List of discovery service sequences that generate the requested annotation types
+	 */
+	private List<DiscoveryServiceSequence> calculateDiscoveryServiceSequences(List<String> annotationTypes) {
+		List<DiscoveryServiceSequence> result = null;
+
+		for (String currentType : annotationTypes) {
+			// Calculate discovery sequences for current annotation type
+			List<DiscoveryServiceSequence> additionalDiscoveryApproaches = getDiscoveryServiceSequencesForAnnotationType(currentType);
+			if (result == null) {
+				result = additionalDiscoveryApproaches;
+			} else {
+				// Merge with discovery sequences determined for the previous annotation types in the list 
+				result = combineDiscoveryServiceSequences(result, additionalDiscoveryApproaches);
+			}
+		}
+		return result;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java
new file mode 100755
index 0000000..20b7661
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultStatusQueueStore.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+
+/**
+ * This class is an in-memory store for both request trackers (showing the status of analysis requests) as well as
+ * a for annotations. Both trackers and annotations are put on the ODF status queue which 
+ * (a) stores as "semi"-persistent store ("semi" because Kafka's retention mechanism will evantuall delete them), and
+ * (b) a way to propagate those changes to other ODF nodes.
+ * The annotations and trackers themselves are stored in memory in static variables.
+ * 
+ * This is how it works:
+ * 1. A single consumer thread listens on the status topic
+ * 2. If an incoming status queue entry is a tracker, it stores it in the in-memory tracker store
+ *    If it is an annotation, it stores it in the in-memory annotation store
+ * 3. Queries for trackers and annotations only go against the in-memory stores
+ * 4. When a check for overaged entries occurs (a check that removes trackers form the store which are older than the queue retention time)
+ *    the annotations for overaged and finished requests are also deleted (see removeOveragedEntries())
+ *   
+ *    
+ *
+ */
+public class DefaultStatusQueueStore implements AnalysisRequestTrackerStore, AnnotationStore {
+
+	static Logger logger = Logger.getLogger(DefaultStatusQueueStore.class.getName());
+	
+	public static final long IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS = 5000;
+	
+	static Object globalRequestStoreMapLock = new Object();
+	
+	/*
+	 * http://docs.oracle.com/javase/7/docs/api/java/util/LinkedHashMap.html
+	 * 
+	 * A structural modification is any operation that adds or deletes one or more mappings or, in the case of access-ordered linked hash maps, affects iteration order. 
+	 * In insertion-ordered linked hash maps, merely changing the value associated with a key that is already contained in the map is not a structural modification. 
+	 * In access-ordered linked hash maps, merely querying the map with get is a structural modification.) 
+	 */
+	static LinkedHashMap<String, AnalysisRequestTracker> globalRequestStoreMap = new LinkedHashMap<String, AnalysisRequestTracker>();
+	
+	/*
+	 * This map is only used to track if storing an object was successful
+	 *  
+	 */
+	static ConcurrentHashMap<String, Boolean> globalStoreSuccessMap = new ConcurrentHashMap<String, Boolean>();
+		
+	private String analysisRun;
+	
+	// simplest implementation for now: just keep a simple list
+	private static List<Annotation> storedAnnotations = new LinkedList<>();
+	private static Object storedAnnotationsLock = new Object();
+
+	/**
+	 * This processor reads trackers from the queue and stores it in the globalRequestStoreMap.
+	 * The thread for this processor is created in the QueueManager implementation.
+	 *
+	 */
+	public static class StatusQueueProcessor implements QueueMessageProcessor {
+		Logger logger = Logger.getLogger(StatusQueueProcessor.class.getName());
+
+		@Override
+		public void process(ExecutorService executorService, String message, int partition, long offset) {
+			StatusQueueEntry sqe = new StatusQueueEntry();
+			try {
+				sqe = JSONUtils.fromJSON(message, StatusQueueEntry.class);
+			} catch (Exception e) {
+				logger.log(Level.WARNING, "Entry in status queue could not be processed", e);
+			}
+			
+			// first handle trackers and / or initial cleanup
+			synchronized (globalRequestStoreMapLock) {
+				if (sqe.getAnalysisRequestTracker() != null) {
+					try {
+						AnalysisRequestTracker tracker = sqe.getAnalysisRequestTracker();
+						String requestID = tracker.getRequest().getId();
+						logger.log(Level.FINEST, "Store status queue: found tracker with id ''{0}'', tracker: {1}", new Object[] { requestID, message });
+						if (tracker.getStatus() == STATUS.FINISHED) {
+							logger.log(Level.INFO, "Request with id ''{0}'' is finished, result: {1}", new Object[] { requestID, message });
+						}
+						//remove item so that it is added to the end of the list.
+						if (globalRequestStoreMap.containsKey(requestID)) {
+							globalRequestStoreMap.remove(requestID);
+						}
+
+						globalRequestStoreMap.put(requestID, tracker);
+						if (tracker != null && tracker.getRevisionId() != null) {
+							globalStoreSuccessMap.put(tracker.getRevisionId(), true);
+						}
+
+					} catch (Exception e) {
+						logger.log(Level.WARNING, "Tracker entry in status queue could not be processed", e);
+					}
+				} 				
+			}
+			
+			if (sqe.getAnnotation() != null) {
+				Annotation annot = sqe.getAnnotation();
+				logger.log(Level.FINEST, "Received annotationk over status queue: ''{0}''", annot.getReference().getId());
+				synchronized (storedAnnotationsLock) {
+					storedAnnotations.add(annot);
+					globalStoreSuccessMap.put(annot.getReference().getId(), true);
+				}
+			}
+
+			removeOveragedEntries();
+		}
+
+	}
+
+	/////////////////////////////////////////////
+	// AnalysisRequestTrackerStore interface implementation
+
+	
+	/*
+	 * This store uses the lastModified timestamp to remove overaged trackers. 
+	 * Therefore, the lastModified timestamp MUST be set before storing anything and prevent unwanted removal
+	 */
+	@Override
+	public void store(AnalysisRequestTracker tracker) {
+		String id = tracker.getRequest().getId();
+		logger.fine("Store " + id + " in trackerStore");
+
+		String revId = UUID.randomUUID() + "_" + System.currentTimeMillis();
+		tracker.setRevisionId(revId);
+		globalStoreSuccessMap.put(revId, false);
+		
+		ODFInternalFactory factory = new ODFInternalFactory();
+		DiscoveryServiceQueueManager qm = factory.create(DiscoveryServiceQueueManager.class);
+		// put the tracker onto the status queue, the actual map that is used in query() is filled by the ARTProcessor listening on the status queue
+		StatusQueueEntry sqe = new StatusQueueEntry();
+		sqe.setAnalysisRequestTracker(tracker);
+		qm.enqueueInStatusQueue(sqe);
+		waitUntilEntryArrives(revId);
+	}
+
+	private void waitUntilEntryArrives(String entryId) {
+		boolean found = false;
+		int maxNumWaits = 1500;
+		int sleepMS = 20;
+		while (maxNumWaits > 0) {
+			final Boolean storageSuccess = globalStoreSuccessMap.get(entryId);
+			if (storageSuccess != null && storageSuccess == true) {
+				found = true;
+				globalStoreSuccessMap.remove(entryId);
+				break;
+			}
+			try {
+				Thread.sleep(sleepMS);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+			}
+			maxNumWaits--;
+		}
+		if(!found){
+			final String message = "The tracker could not be stored in 30 sec!";
+			logger.warning(message);
+			throw new RuntimeException(message);
+		}else{
+			logger.fine("Tracker stored after " + ((1500 - maxNumWaits) * sleepMS) + " ms");
+		}
+	}
+
+	@Override
+	public AnalysisRequestTracker query(String analysisRequestId) {
+		logger.fine("Querying store for " + analysisRequestId);
+		synchronized (globalRequestStoreMapLock) {
+			AnalysisRequestTracker tracker = globalRequestStoreMap.get(analysisRequestId);
+			return tracker;
+		}
+	}
+	
+	@Override
+	public void clearCache() {
+		logger.fine("Clearing store cache");
+		synchronized (globalRequestStoreMapLock) {
+			globalRequestStoreMap.clear();
+		}
+	}
+	
+	private static void removeOveragedEntries(){
+		Set<String> finishedRequests = new HashSet<>();
+		logger.fine("Removing overaged entries from store");
+		synchronized (globalRequestStoreMapLock) {
+			Iterator<Entry<String, AnalysisRequestTracker>> entryIterator = globalRequestStoreMap.entrySet().iterator();
+			long maxRetentionMS = new ODFFactory().create().getSettingsManager().getODFSettings().getMessagingConfiguration().getAnalysisRequestRetentionMs();
+			long currentTimeMS = System.currentTimeMillis();
+			while(entryIterator.hasNext()){
+				Entry<String, AnalysisRequestTracker> entry = entryIterator.next();
+				AnalysisRequestTracker tracker = entry.getValue();
+				if(currentTimeMS - tracker.getLastModified() >= maxRetentionMS){
+					if (tracker.getStatus() == STATUS.FINISHED || tracker.getStatus() == STATUS.ERROR) {
+						finishedRequests.add(tracker.getRequest().getId());
+					}
+					entryIterator.remove();
+					logger.log(Level.INFO, "Removed overaged status tracker with id ''{0}''", new Object[] { entry.getKey() });
+				}else{
+					/*
+					 * items in a linkedHashMap are ordered in the way they were put into the map.
+					 * Because of this, if one item is not overaged, all following won't be either
+					*/
+					break;
+				}
+			}
+		}
+		synchronized (storedAnnotationsLock) {
+			ListIterator<Annotation> it = storedAnnotations.listIterator();
+			while (it.hasNext()) {
+				Annotation annot = it.next();
+				if (finishedRequests.contains(annot.getAnalysisRun())) {
+					it.remove();
+				}
+			}
+		}
+	}
+
+	@Override
+	public int getSize() {
+		synchronized (globalRequestStoreMapLock) {
+			return globalRequestStoreMap.keySet().size();
+		}
+	}
+	
+	@Override
+	public AnalysisRequestTracker findSimilarQueuedRequest(AnalysisRequest request) {
+		synchronized (globalRequestStoreMapLock) {
+			for (AnalysisRequestTracker tracker : globalRequestStoreMap.values()) {
+				long startedAfterLimit = System.currentTimeMillis() - IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS;
+				if (TrackerUtil.isAnalysisWaiting(tracker) || 
+						(tracker.getNextDiscoveryServiceRequest() == 0 && tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING && tracker.getLastModified() >= startedAfterLimit)) {
+					AnalysisRequest otherRequest = tracker.getRequest();
+					List<MetaDataObjectReference> dataSets = request.getDataSets();
+					List<MetaDataObjectReference> otherDataSets = otherRequest.getDataSets();
+					
+					if (otherDataSets.containsAll(dataSets) && tracker.getDiscoveryServiceRequests().get(0).getDiscoveryServiceId().equals(
+							request.getDiscoveryServiceSequence().get(0))) {
+						logger.log(Level.FINEST, "Found similar request for request {0}", new Object[] { request.getId()});
+						return tracker;
+					}
+				}
+			}
+			return null;
+		}
+	}
+
+	
+	@Override
+	public List<AnalysisRequestTracker> getRecentTrackers(int offset, int limit) {
+		if (offset < 0) {
+			throw new RuntimeException("Offset parameter cannot be negative.");
+		}
+		if (limit < -1) {
+			throw new RuntimeException("Limit parameter cannot be smaller than -1.");
+		}
+		synchronized (globalRequestStoreMapLock) {
+			List<AnalysisRequestTracker> arsList = new ArrayList<>();
+			Iterator<Map.Entry<String, AnalysisRequestTracker>> it = globalRequestStoreMap.entrySet().iterator();
+			// filter out health check requests
+			while (it.hasNext()) {
+				AnalysisRequestTracker t = it.next().getValue();
+				if (!t.getRequest().getDataSets().get(0).getId().startsWith(ControlCenter.HEALTH_TEST_DATA_SET_ID_PREFIX)) {
+					arsList.add(t);
+				}
+			}
+			// now pick number many requests from the end
+			List<AnalysisRequestTracker> result = new ArrayList<>();
+			if (arsList.size() > offset) {
+				int startIndex = arsList.size() - offset - limit;
+				if (limit == -1 || startIndex < 0) {
+					startIndex = 0;
+				}
+				int endIndex = arsList.size() - offset - 1;
+				if (endIndex < 0) {
+					endIndex = 0;
+				}
+				for (int i=endIndex ; i>=startIndex; i--) {
+					result.add(arsList.get(i));
+				}
+			}
+			return result;
+		}
+	}
+	
+	@Override
+	public AnalysisRequestSummary getRequestSummary() {
+		synchronized (globalRequestStoreMapLock) {
+			try {
+				List<AnalysisRequestTracker> recentTrackers = this.getRecentTrackers(0, -1);
+				int totalSuccess = 0;
+				int totalFailure = 0;
+	
+				for (AnalysisRequestTracker tracker : recentTrackers) {
+					if (STATUS.FINISHED.equals(tracker.getStatus())) {
+						totalSuccess++;
+					} else if (STATUS.ERROR.equals(tracker.getStatus())) {
+						totalFailure++;
+					}
+				}
+				return new AnalysisRequestSummary(totalSuccess, totalFailure);
+			} catch (Exception exc) {
+				throw new RuntimeException(exc);
+			}
+		}	
+	}
+
+	/////////////////////////////////////////////
+	// AnnotationStore interface implementation
+	
+	@Override
+	public Properties getProperties() {
+		Properties props = new Properties();
+		props.put(STORE_PROPERTY_TYPE, "DefaultAnnotationStore");
+		props.put(STORE_PROPERTY_ID, getRepositoryId());
+		props.put(STORE_PROPERTY_DESCRIPTION, "A default in-memory implementation of the annotation store storing its results via Kafka");
+		return props;
+	}
+
+	@Override
+	public String getRepositoryId() {
+		return "ODFDefaultAnnotationStore";
+	}
+
+	@Override
+	public ConnectionStatus testConnection() {
+		return ConnectionStatus.OK;
+	}
+
+	@Override
+	public MetaDataObjectReference store(Annotation annotation) {
+		// clone object
+		try {
+			annotation = JSONUtils.cloneJSONObject(annotation);
+		} catch (JSONException e) {
+			logger.log(Level.SEVERE, "Annotation could not be stored because JSON conversion failed.", e);
+			throw new RuntimeException(e);
+		}
+		
+		// create a new reference
+		String annotId = "Annot" + UUID.randomUUID() + "_" + System.currentTimeMillis();
+		logger.log(Level.FINEST, "Storing annotation with ID ''{0}''", annotId);
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId(annotId);
+		ref.setRepositoryId(getRepositoryId());
+		annotation.setReference(ref);
+		if (analysisRun != null) {
+			annotation.setAnalysisRun(analysisRun);
+		}
+		
+		// re-use mechanism from status queue to wait until message has arrived via Kafka
+		globalStoreSuccessMap.put(annotId, false);
+		DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
+		StatusQueueEntry sqe = new StatusQueueEntry();
+		sqe.setAnnotation(annotation);
+		qm.enqueueInStatusQueue(sqe);
+		waitUntilEntryArrives(annotId);
+		return ref;
+	}
+
+	@Override
+	public List<Annotation> getAnnotations(MetaDataObjectReference object, String analysisRequestId) {
+		List<Annotation> results = new ArrayList<>();
+		synchronized (storedAnnotationsLock) {
+			logger.log(Level.FINEST, "Number of annotations stored: ''{0}''", storedAnnotations.size());
+			ListIterator<Annotation> it = storedAnnotations.listIterator();
+			while (it.hasNext()) {
+				Annotation annot = it.next();
+				boolean match = true;
+				if (object != null) {
+					match = match && object.equals(AnnotationStoreUtils.getAnnotatedObject(annot));
+				}
+				if (annot.getAnalysisRun() != null) {
+					// analysisRun is not set for health check and for some of the tests
+					if (analysisRequestId != null) {
+						match &= annot.getAnalysisRun().equals(analysisRequestId);
+					}
+				}
+				if (match) {
+					results.add(annot);
+				}
+			}
+		}
+		logger.log(Level.FINEST, "Number of annotations found for request Id ''{0}'': ''{1}''", new Object[]{analysisRequestId, results.size()});
+		return results;
+	}
+
+	@Override
+	public void setAnalysisRun(String analysisRun) {
+		this.analysisRun = analysisRun;
+	}
+
+	@Override
+	public String getAnalysisRun() {
+		return this.analysisRun;
+	}
+
+	@Override
+	public Annotation retrieveAnnotation(MetaDataObjectReference ref) {
+		synchronized (storedAnnotationsLock) {
+			logger.log(Level.FINEST, "Number of annotations stored: ''{0}''", storedAnnotations.size());
+			ListIterator<Annotation> it = storedAnnotations.listIterator();
+			while (it.hasNext()) {
+				Annotation annot = it.next();
+				if (annot.getReference().equals(ref)) {
+					return annot;
+				}
+			}
+		}
+		return null;
+	}
+
+	@Override
+	public void setStatusOfOldRequest(long cutOffTimestamp, STATUS status, String detailsMessage) {
+		synchronized (globalRequestStoreMapLock) {
+			DiscoveryServiceQueueManager qm = new ODFInternalFactory().create(DiscoveryServiceQueueManager.class);
+			for (AnalysisRequestTracker tracker : globalRequestStoreMap.values()) {
+				if (tracker.getLastModified() < cutOffTimestamp //
+						&& (STATUS.DISCOVERY_SERVICE_RUNNING.equals(tracker.getStatus()) //
+								|| STATUS.IN_DISCOVERY_SERVICE_QUEUE.equals(tracker.getStatus()) //
+								|| STATUS.INITIALIZED.equals(tracker.getStatus()) //
+						)) {
+					// set the tracker in-memory to have the result available immediately
+					tracker.setStatus(status);
+					if (detailsMessage == null) {
+						detailsMessage = "Setting request to " + status + " because it was last modified before " + new Date(cutOffTimestamp);
+					}
+					tracker.setStatusDetails(detailsMessage);
+					// put tracker onto queue
+					StatusQueueEntry sqe = new StatusQueueEntry();
+					sqe.setAnalysisRequestTracker(tracker);
+					qm.enqueueInStatusQueue(sqe);
+				}
+			}
+		}
+		
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java
new file mode 100755
index 0000000..0ea909f
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultThreadManager.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.lang.Thread.State;
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+
+public class DefaultThreadManager implements ThreadManager {
+
+	private Logger logger = Logger.getLogger(DefaultThreadManager.class.getName());
+
+	static Object unmanagedThreadLock = new Object();
+	static Map<String, Thread> unmanagedThreadMap = new HashMap<String, Thread>();
+	static Map<String, ODFRunnable> unmanagedThreadRunnableMap = new HashMap<String, ODFRunnable>();
+	
+	ExecutorService executorService;
+
+	public DefaultThreadManager() {
+	}
+	
+	private boolean isThreadRunning(Thread thread) {
+		return thread.getState() != State.TERMINATED;
+	}
+	
+	private void purgeTerminatedThreads() {
+		List<String> entriesToBeRemoved = new ArrayList<String>();
+		List<String> entriesToBeKept = new ArrayList<String>();
+		for (Map.Entry<String, Thread> entry : unmanagedThreadMap.entrySet()) {
+			if (!isThreadRunning(entry.getValue())) {
+				entriesToBeRemoved.add(entry.getKey());
+			} else {
+				entriesToBeKept.add(entry.getKey());
+			}
+		}
+		for (String id : entriesToBeRemoved) {
+			unmanagedThreadMap.remove(id);
+			unmanagedThreadRunnableMap.remove(id);
+		}
+		logger.finer("Removed finished threads: " + entriesToBeRemoved.toString());
+		logger.finer("Kept unfinished threads: " + entriesToBeKept.toString());
+	}
+	
+	@Override
+	public ThreadStartupResult startUnmanagedThread(final String id, final ODFRunnable runnable) {
+		ThreadStartupResult result = new ThreadStartupResult(id) {
+			@Override
+			public boolean isReady() {
+				synchronized (unmanagedThreadLock) {
+					if (unmanagedThreadRunnableMap.containsKey(id)) {
+						return unmanagedThreadRunnableMap.get(id).isReady();
+					}
+				}
+				return false;
+			}
+		};
+		synchronized (unmanagedThreadLock) {
+			purgeTerminatedThreads();
+			Thread t = unmanagedThreadMap.get(id);
+			if (t != null) {
+				if (isThreadRunning(t)) {
+					return result;
+				}
+			} 
+			runnable.setExecutorService(executorService);
+
+			Thread newThread = new Thread(runnable);
+			result.setNewThreadCreated(true);
+			newThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
+
+				@Override
+				public void uncaughtException(Thread thread, Throwable throwable) {
+					logger.log(Level.WARNING, "Uncaught exception in thread " + id + " - Thread will shutdown!", throwable);
+					synchronized (unmanagedThreadLock) {
+						purgeTerminatedThreads();
+					}
+				}
+			});
+
+			newThread.setDaemon(true); // TODO is it a daemon?
+			newThread.start();
+			unmanagedThreadMap.put(id, newThread);
+			unmanagedThreadRunnableMap.put(id,  runnable);
+		}
+		return result;
+	}
+
+	@Override
+	public ThreadStatus.ThreadState getStateOfUnmanagedThread(String id) {
+		synchronized (unmanagedThreadLock) {
+			Thread t = unmanagedThreadMap.get(id);
+			if (t == null) {
+				return ThreadStatus.ThreadState.NON_EXISTENT;
+			}
+			Thread.State ts = t.getState();
+			switch (ts) {
+			case TERMINATED:
+				return ThreadStatus.ThreadState.FINISHED;
+			default:
+				return ThreadStatus.ThreadState.RUNNING;
+			}
+		}
+	}
+
+
+
+	@Override
+	public void setExecutorService(ExecutorService executorService) {
+		this.executorService = executorService;
+	}
+
+	@Override
+	public void shutdownAllUnmanagedThreads() {
+		synchronized (unmanagedThreadLock) {
+			logger.log(Level.INFO, "Shutting down all ODF threads...");
+			for (String id : unmanagedThreadMap.keySet()) {
+				shutdownThreadImpl(id, false);
+			}
+			unmanagedThreadMap.clear();
+			unmanagedThreadRunnableMap.clear();
+			logger.log(Level.INFO, "All ODF threads shutdown");
+			purgeTerminatedThreads();
+		}		
+	}
+	
+	public void shutdownThreads(List<String> names) {
+		synchronized (unmanagedThreadLock) {
+			for (String name : names) {
+				shutdownThreadImpl(name, true);
+			}
+		}		
+	}
+
+	private void shutdownThreadImpl(String id, boolean purge) {
+		Thread t = unmanagedThreadMap.get(id);
+		if (t == null) {
+			return;
+		}
+		ODFRunnable r = unmanagedThreadRunnableMap.get(id);
+		r.cancel();
+		try {
+			Thread.sleep(500);
+		} catch (InterruptedException e1) {
+			e1.printStackTrace();
+		}
+		int max = 60;
+		while (t.getState() != Thread.State.TERMINATED) {
+			if (max == 0) {
+				break;
+			}
+			max--;
+			try {
+				Thread.sleep(1000);
+			} catch (InterruptedException e) {
+				// do nothing
+				e.printStackTrace();
+			}
+		}
+		if (max == 0) {
+			logger.log(Level.WARNING, "Thread {0} did not stop on its own, must be interrupted.", id);
+			t.interrupt();
+		}
+		if (purge) {
+			purgeTerminatedThreads();
+		}
+	}
+
+	@Override
+	public int getNumberOfRunningThreads() {
+		synchronized (unmanagedThreadLock) {
+			int result = 0;
+			for (Thread t : unmanagedThreadMap.values()) {
+				if (isThreadRunning(t)) {
+					result++;
+				}
+			}
+			return result;
+		}
+	}
+
+	@Override
+	public List<ThreadStatus> getThreadManagerStatus() {
+		synchronized (unmanagedThreadLock) {
+			List<ThreadStatus> result = new ArrayList<ThreadStatus>();
+			for (Entry<String, Thread> entry : unmanagedThreadMap.entrySet()) {
+				ThreadStatus status = new ThreadStatus();
+				status.setId(entry.getKey());
+				status.setState(getStateOfUnmanagedThread(entry.getKey()));
+				ODFRunnable odfRunnable = unmanagedThreadRunnableMap.get(entry.getKey());
+				if (odfRunnable != null) {
+					status.setType(odfRunnable.getClass().getName());
+				}
+				result.add(status);
+			}
+
+			return result;
+		}
+	}
+
+	@Override
+	public void waitForThreadsToBeReady(long waitingLimitMs, List<ThreadStartupResult> startedThreads) throws TimeoutException {
+		Set<String> threadsToWaitFor = new HashSet<String>();
+		for (ThreadStartupResult res : startedThreads) {
+			//Only if a new thread was created we wait for it to be ready.
+			if (res.isNewThreadCreated()) {
+				threadsToWaitFor.add(res.getThreadId());
+			}
+		}
+		if (threadsToWaitFor.isEmpty()) {
+			return;
+		}
+
+		final int msToWait = 200;
+		final long maxPolls = waitingLimitMs / msToWait;
+		int count = 0;
+		while (threadsToWaitFor.size() > 0 && count < maxPolls) {
+			List<String> ready = new ArrayList<String>();
+			List<String> notReady = new ArrayList<String>();
+			for (ThreadStartupResult thr : startedThreads) {
+				if (thr.isReady()) {
+					ready.add(thr.getThreadId());
+					threadsToWaitFor.remove(thr.getThreadId());
+				} else {
+					notReady.add(thr.getThreadId());
+				}
+			}
+
+			logger.fine("Ready: " + ready);
+			logger.fine("NotReady: " + notReady);
+
+			try {
+				Thread.sleep(msToWait);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+			}
+			count++;
+		}
+		if (count >= maxPolls) {
+			String msg = "Threads: " + threadsToWaitFor + "' are not ready yet after " + waitingLimitMs + " ms, give up to wait for it";
+			logger.log(Level.WARNING, msg);
+			throw new TimeoutException(msg);
+		}
+		
+		logger.fine("All threads ready after " + (count * msToWait) + "ms");
+	}
+
+	@Override
+	public ODFRunnable getRunnable(String name) {
+		synchronized (unmanagedThreadLock) {
+			return unmanagedThreadRunnableMap.get(name);
+		}
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java
new file mode 100755
index 0000000..0f79e0c
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DefaultTransactionContextExecutor.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.Callable;
+
+/**
+ * The default TransactionContextExecutor runs code in the same thread as the caller.
+ * 
+ */
+public class DefaultTransactionContextExecutor implements TransactionContextExecutor {
+	
+	@Override
+	public Object runInTransactionContext(Callable<Object> callable) throws Exception {
+		return callable.call();
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java
new file mode 100755
index 0000000..dbfb597
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceStarter.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.annotation.InternalAnnotationStoreUtils;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+ * This class processes the entries of a discovery service queue and runs its respective discovery services in a separate thread. 
+ * 
+ */
+public class DiscoveryServiceStarter implements QueueMessageProcessor {
+
+	private Logger logger = Logger.getLogger(DiscoveryServiceStarter.class.getName());
+
+	AnalysisRequestTrackerStore trackerStore = null;
+	ControlCenter controlCenter = null;
+	Environment environment = null;
+	
+	/**
+	 * parameters must be a three element String[] containing the DiscoveryServiceRequest, the partition number (int) and the offset (long).
+	 */
+	public DiscoveryServiceStarter() {
+		ODFInternalFactory factory = new ODFInternalFactory();
+		trackerStore = factory.create(AnalysisRequestTrackerStore.class);
+		controlCenter = factory.create(ControlCenter.class);
+		environment = factory.create(Environment.class);
+	}
+	
+	private DiscoveryServiceRequest cloneDSRequestAndAddServiceProps(DiscoveryServiceRequest request, boolean requiresMetaDataCache) throws JSONException {
+		DiscoveryServiceRequest clonedRequest = JSONUtils.cloneJSONObject(request);
+		Map<String, Object> additionalProps = clonedRequest.getAdditionalProperties();
+		if (additionalProps == null) {
+			additionalProps = new HashMap<>();
+			clonedRequest.setAdditionalProperties(additionalProps);
+		}
+		// add service specific properties
+		String id = request.getDiscoveryServiceId();
+		Map<String, String> serviceProps = environment.getPropertiesWithPrefix(id);
+		additionalProps.putAll(serviceProps);
+		
+		// add cached metadata objects to request if required
+		if (requiresMetaDataCache) {
+			MetaDataObject mdo = request.getDataSetContainer().getDataSet();
+			MetadataStore mds = new ODFInternalFactory().create(MetadataStore.class);
+			clonedRequest.getDataSetContainer().setMetaDataCache(CachedMetadataStore.retrieveMetaDataCache(mds, mdo));
+		}
+
+		return clonedRequest;
+	}
+
+	
+	/**
+	 * starts the service taken from the service runtime topic.
+	 */
+	public void process(ExecutorService executorService, String message, int partition, long offset) {
+		AnalysisRequestTracker tracker = null;
+		try {
+			tracker = JSONUtils.fromJSON(message, AnalysisRequestTracker.class);
+			logger.log(Level.FINEST, "DSStarter: received tracker {0}", JSONUtils.lazyJSONSerializer(tracker));
+			// load tracker from store and check if it was cancelled in the meantime
+			AnalysisRequestTracker storedRequest = trackerStore.query(tracker.getRequest().getId());
+
+			if (storedRequest == null || storedRequest.getStatus() != STATUS.CANCELLED) {
+				// set tracker to running
+				tracker.setStatus(STATUS.DISCOVERY_SERVICE_RUNNING);
+				trackerStore.store(tracker);
+				
+				DiscoveryServiceRequest nextRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+				if (nextRequest == null) {
+					logger.log(Level.WARNING, "Request in queue has wrong format");
+					tracker.setStatus(STATUS.ERROR);
+				} else {
+					nextRequest.setTakenFromRequestQueue(System.currentTimeMillis());
+					trackerStore.store(tracker);
+					String dsID = nextRequest.getDiscoveryServiceId();
+					SyncDiscoveryService nextService = ControlCenter.getDiscoveryServiceProxy(dsID, tracker.getRequest());
+					if (nextService == null) {
+						logger.log(Level.WARNING, "Discovery Service ''{0}'' could not be created", dsID);
+						throw new DiscoveryServiceUnreachableException("Java proxy for service with id " + dsID + " could not be created");
+					} else {
+						DataSetContainer ds = nextRequest.getDataSetContainer();
+						DataSetCheckResult checkResult = nextService.checkDataSet(ds);
+						if (checkResult.getDataAccess() == DataSetCheckResult.DataAccess.NotPossible) {
+							String responseDetails = "";
+							if (checkResult.getDetails() != null) {
+								responseDetails = " Reason: " + checkResult.getDetails();
+							}
+							if (tracker.getRequest().isIgnoreDataSetCheck()) {
+								String msg = MessageFormat.format("Discovery service ''{0}'' cannot process data set ''{1}''.{2} - Ignoring and advancing to next service",
+										new Object[]{dsID, ds.getDataSet().getReference(), responseDetails});
+								logger.log(Level.INFO, msg);
+								// check for next queue
+								DiscoveryServiceSyncResponse dummyResponse = new DiscoveryServiceSyncResponse();
+								dummyResponse.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+								dummyResponse.setDetails(msg);
+								TrackerUtil.addDiscoveryServiceStartResponse(tracker, dummyResponse);
+								controlCenter.advanceToNextDiscoveryService(tracker);
+							} else {
+								tracker.setStatus(STATUS.ERROR);
+								String msg = MessageFormat.format("Discovery service ''{0}'' cannot process data set ''{1}''.{2}",
+										new Object[]{dsID, ds.getDataSet().getReference(), responseDetails});
+								tracker.setStatusDetails(msg);
+								logger.log(Level.WARNING, msg);
+							}
+						} else {
+							nextService.setExecutorService(executorService);
+							runServiceInBackground(executorService, tracker, nextRequest, nextService);
+						}
+					}
+				}
+			}
+		} catch (DiscoveryServiceUnreachableException exc) {
+			logger.log(Level.WARNING, "Discovery service could not be started because it is unreachable", exc);
+			if (tracker != null) {
+				tracker.setStatus(STATUS.ERROR);
+				tracker.setStatusDetails(exc.getReason());
+			}
+		} catch (Throwable exc) {
+			logger.log(Level.WARNING, "An error occurred when starting the discovery service", exc);
+			if (tracker != null) {
+				tracker.setStatus(STATUS.ERROR);
+				tracker.setStatusDetails(Utils.getExceptionAsString(exc));
+			}
+		}
+		updateTracker(tracker);
+	}
+
+	
+	class ServiceRunner implements ODFRunnable {
+		AnalysisRequestTracker tracker;
+		DiscoveryServiceRequest nextRequest;
+		SyncDiscoveryService nextService;
+		
+		public ServiceRunner(AnalysisRequestTracker tracker, DiscoveryServiceRequest nextRequest, SyncDiscoveryService nextService) {
+			super();
+			this.tracker = tracker;
+			this.nextRequest = nextRequest;
+			this.nextService = nextService;
+		}
+
+		@Override
+		public void run() {
+			try {
+				runService(tracker, nextRequest, nextService);
+			} catch (Throwable exc) {
+				logger.log(Level.WARNING, "An error occurred when running the discovery service", exc);
+				if (tracker != null) {
+					tracker.setStatus(STATUS.ERROR);
+					tracker.setStatusDetails(Utils.getExceptionAsString(exc));
+				}
+			}
+			updateTracker(tracker);
+		}
+		
+		@Override
+		public void setExecutorService(ExecutorService service) {
+			
+		}
+		
+		@Override
+		public boolean isReady() {
+			return true;
+		}
+		
+		@Override
+		public void cancel() {
+		}
+
+	}
+	
+	
+	private void runServiceInBackground(ExecutorService executorService, final AnalysisRequestTracker tracker, final DiscoveryServiceRequest nextRequest, final SyncDiscoveryService nextService) throws JSONException {
+		String suffix = nextRequest.getDiscoveryServiceId() + "_" + nextRequest.getOdfRequestId() + UUID.randomUUID().toString();
+		String runnerId = "DSRunner_" + suffix;
+		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
+		ServiceRunner serviceRunner = new ServiceRunner(tracker, nextRequest, nextService);
+		tm.setExecutorService(executorService);
+		tm.startUnmanagedThread(runnerId, serviceRunner);
+	}
+	
+	private void runService(AnalysisRequestTracker tracker, DiscoveryServiceRequest nextRequest, SyncDiscoveryService nextService) throws JSONException {
+		DiscoveryServiceResponse response = null;
+		String dsID = nextRequest.getDiscoveryServiceId();
+		boolean requiresAuxObjects = controlCenter.requiresMetaDataCache(nextService);
+		if (nextService instanceof SyncDiscoveryService) {
+			SyncDiscoveryService nextServiceSync = (SyncDiscoveryService) nextService;
+			logger.log(Level.FINER, "Starting synchronous analysis on service {0}", dsID);
+			DiscoveryServiceSyncResponse syncResponse = nextServiceSync.runAnalysis(cloneDSRequestAndAddServiceProps(nextRequest, requiresAuxObjects));
+			nextRequest.setFinishedProcessing(System.currentTimeMillis());
+			//Even if the analysis was concurrently cancelled we store the results since the service implementation could do this by itself either way.
+			long before = System.currentTimeMillis();
+			InternalAnnotationStoreUtils.storeDiscoveryServiceResult(syncResponse.getResult(), tracker.getRequest());
+			nextRequest.setTimeSpentStoringResults(System.currentTimeMillis() - before);
+			// remove result to reduce size of response
+			syncResponse.setResult(null);
+			response = syncResponse;
+		} else {
+			throw new RuntimeException("Unknown Java proxy created for service with id " + dsID);
+		}
+
+		// process response
+		if (response.getCode() == null) {
+			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+			String origDetails = response.getDetails();
+			response.setDetails(MessageFormat.format("Discovery service did not return a response code. Assuming error. Original message: {0}", origDetails));
+		}
+		switch (response.getCode()) {
+		case UNKNOWN_ERROR:
+			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
+			tracker.setStatus(STATUS.ERROR);
+			tracker.setStatusDetails(response.getDetails());
+			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unknown error ''{0}'', ''{1}''", new Object[] { response.getCode().name(),
+					response.getDetails(), dsID });
+			break;
+		case NOT_AUTHORIZED:
+			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
+			tracker.setStatus(STATUS.ERROR);
+			tracker.setStatusDetails(response.getDetails());
+			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unauthorized ''{0}'', ''{1}''", new Object[] { response.getCode().name(),
+					response.getDetails(), dsID });
+			break;
+		case TEMPORARILY_UNAVAILABLE:
+			tracker.setStatus(STATUS.IN_DISCOVERY_SERVICE_QUEUE);
+			logger.log(Level.INFO, "Discovery Service ''{2}'' responded that it is unavailable right now ''{0}'', ''{1}''", new Object[] {
+					response.getCode().name(), response.getDetails(), dsID });
+			// reqeue and finish immediately
+			controlCenter.getQueueManager().enqueue(tracker);
+			return;
+		case OK:
+			TrackerUtil.addDiscoveryServiceStartResponse(tracker, response);
+			logger.log(Level.FINER, "Synchronous Discovery Service processed request ''{0}'', ''{1}''", new Object[] { response.getCode().name(), response.getDetails() });
+			AnalysisRequestTracker storedTracker = trackerStore.query(tracker.getRequest().getId());
+			//A user could've cancelled the analysis concurrently. In this case, ignore the response and don't overwrite the tracker
+			if (storedTracker != null && storedTracker.getStatus() != STATUS.CANCELLED) {
+				// check for next queue
+				controlCenter.advanceToNextDiscoveryService(tracker);
+			} else {
+				logger.log(Level.FINER, "Not advancing analysis request because it was cancelled!");
+			}
+			break;
+		default:
+			tracker.setStatus(STATUS.ERROR);
+			tracker.setStatusDetails(response.getDetails());
+			logger.log(Level.WARNING, "Discovery Service ''{2}'' responded with an unknown response ''{0}'', ''{1}''", new Object[] {
+					response.getCode().name(), response.getDetails(), dsID });
+			break;
+		}
+	}
+
+	private boolean updateTracker(AnalysisRequestTracker tracker) {
+		boolean cancelled = false;
+		if (tracker != null) {
+			AnalysisRequestTracker storedTracker = trackerStore.query(tracker.getRequest().getId());
+			//A user could've cancelled the analysis concurrently. In this case, ignore the response and don't overwrite the tracker
+			if (storedTracker == null || (! STATUS.CANCELLED.equals(storedTracker.getStatus())) ) {
+				Utils.setCurrentTimeAsLastModified(tracker);
+				trackerStore.store(tracker);
+			} else {
+				cancelled = true;
+				logger.log(Level.FINER, "Not storing analysis tracker changes because it was cancelled!");
+			}
+		}
+		return cancelled;
+	}
+	
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java
new file mode 100755
index 0000000..38e0747
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/DiscoveryServiceUnreachableException.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+public class DiscoveryServiceUnreachableException extends RuntimeException {
+
+	private static final long serialVersionUID = 3581149213306073675L;
+	
+	private String reason;
+
+	public DiscoveryServiceUnreachableException(String reason) {
+		super(reason);
+		this.reason = reason;
+	}
+
+	public String getReason() {
+		return reason;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java
new file mode 100755
index 0000000..4cba0f6
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ExecutorServiceFactory.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+public class ExecutorServiceFactory {
+
+	static Object execServiceLock = new Object();
+	static ExecutorService executorService = null;
+	
+	public ExecutorService createExecutorService() {
+		synchronized (execServiceLock) {
+			if (executorService == null) {
+				executorService = Executors.newCachedThreadPool();
+			}
+		}
+		return executorService;
+	}
+	
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java
new file mode 100755
index 0000000..848a673
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/HealthCheckServiceRuntime.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.api.discoveryservice.*;
+
+public class HealthCheckServiceRuntime implements ServiceRuntime {
+	public static final String HEALTH_CHECK_RUNTIME_NAME = "HealthCheck";
+
+	@Override
+	public String getName() {
+		return HEALTH_CHECK_RUNTIME_NAME;
+	}
+
+	@Override
+	public long getWaitTimeUntilAvailable() {
+		return 0;
+	}
+
+	@Override
+	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
+		return new SyncDiscoveryServiceBase() {
+			
+			@Override
+			public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+				DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+				response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+				response.setDetails("Health check service finished successfully");
+				return response;
+			}
+		};
+	}
+	
+	public static DiscoveryServiceProperties getHealthCheckServiceProperties() {		
+		DiscoveryServiceProperties props = new DiscoveryServiceProperties();
+		props.setId(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
+		props.setDescription("Health check service");
+		
+		DiscoveryServiceEndpoint ep = new DiscoveryServiceEndpoint();
+		ep.setRuntimeName(HEALTH_CHECK_RUNTIME_NAME);
+		
+		props.setEndpoint(ep);
+		return props;
+	}
+
+	@Override
+	public String getDescription() {
+		return "Internal runtime dedicated to health checks";
+	}
+
+	@Override
+	public void validate(DiscoveryServiceProperties props) throws ValidationException {
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java
new file mode 100755
index 0000000..61a29b1
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/JavaServiceRuntime.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.settings.validation.ImplementationValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.core.Utils;
+
+public class JavaServiceRuntime implements ServiceRuntime {
+
+	Logger logger = Logger.getLogger(JavaServiceRuntime.class.getName());
+
+	public static final String NAME = "Java";
+	
+	@Override
+	public String getName() {
+		return NAME;
+	}
+
+	@Override
+	public long getWaitTimeUntilAvailable() {
+		// for now, always run
+		return 0;
+	}
+
+	@Override
+	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
+		DiscoveryService service = null;
+		String className = null;
+		try {
+			className = JSONUtils.convert(props.getEndpoint(), DiscoveryServiceJavaEndpoint.class).getClassName();
+			Class<?> clazz = Class.forName(className);
+			Object o = clazz.newInstance();
+			service = (DiscoveryService) o;
+		} catch (Exception e) {
+			logger.log(Level.FINE, "An error occurred while instatiating Java implementation", e);
+			logger.log(Level.WARNING, "Java implementation ''{0}'' for discovery service ''{1}'' could not be instantiated (internal error: ''{2}'')",
+					new Object[] { className, props.getId(), e.getMessage() });
+			return null;
+		}
+		if (service instanceof SyncDiscoveryService) {
+			return new TransactionSyncDiscoveryServiceProxy((SyncDiscoveryService) service);
+		} else if (service instanceof AsyncDiscoveryService) {
+			return new TransactionAsyncDiscoveryServiceProxy((AsyncDiscoveryService) service);
+		}
+		return service;
+	}
+
+	@Override
+	public String getDescription() {
+		return "The default Java runtime";
+	}
+
+	@Override
+	public void validate(DiscoveryServiceProperties props) throws ValidationException {
+		DiscoveryServiceJavaEndpoint javaEP;
+		try {
+			javaEP = JSONUtils.convert(props.getEndpoint(), DiscoveryServiceJavaEndpoint.class);
+		} catch (JSONException e) {
+			throw new ValidationException("Endpoint definition for Java service is not correct: " + Utils.getExceptionAsString(e));
+		}
+		new ImplementationValidator().validate("Service.endpoint", javaEP.getClassName());
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java
new file mode 100755
index 0000000..f999ecf
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ODFRunnable.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+
+public interface ODFRunnable extends Runnable {
+
+	void setExecutorService(ExecutorService service);
+	
+	void cancel();
+	
+	// return true if the runnable is likely to be ready to receive data
+	boolean isReady();
+	
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java
new file mode 100755
index 0000000..e6642c5
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/QueueMessageProcessor.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.ExecutorService;
+
+
+public interface QueueMessageProcessor {
+
+	/**
+	 * callback to process the message taken from the queue.
+	 * 
+	 * @param executorService
+	 * @param msg The message to be processed
+	 * @param partition The kafka topic partition this message was read from
+	 * @param msgOffset The offset of this particular message on this kafka partition
+	 * @return
+	 */
+	void process(ExecutorService executorService, String msg, int partition, long msgOffset);
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java
new file mode 100755
index 0000000..da06dd2
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntime.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+
+public interface ServiceRuntime {
+	
+	String getName();
+	
+	/**
+	 * Check if the runtime is currently available for processing.
+	 * Returns <= 0 if the runtime is available immediately. A number > 0
+	 * indicates how many seconds to wait until retrying.
+	 * 
+	 * Note: If this method returns > 0 the Kafka consumer will be shut down and only be 
+	 * started again when it returns <= 0. Shutting down and restarting the consumer is
+	 * rather costly so this should only be done if the runtime won't be accepting requests
+	 * for a foreseeable period of time.
+	 */
+	long getWaitTimeUntilAvailable();
+
+	DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props);
+
+	String getDescription();
+	
+	void validate(DiscoveryServiceProperties props) throws ValidationException;
+	
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java
new file mode 100755
index 0000000..a867580
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ServiceRuntimes.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.LineNumberReader;
+import java.net.URL;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.engine.ServiceRuntimeInfo;
+import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+
+public class ServiceRuntimes {
+
+	static Logger logger = Logger.getLogger(ServiceRuntimes.class.getName());
+
+	static List<ServiceRuntime> getRuntimeExtensions() throws IOException {
+		ClassLoader cl = ServiceRuntimes.class.getClassLoader();
+		Enumeration<URL> services = cl.getResources("META-INF/odf/odf-runtimes.txt");
+		List<ServiceRuntime> result = new ArrayList<>();
+		while (services.hasMoreElements()) {
+			URL url = services.nextElement();
+			InputStream is = url.openStream();
+			InputStreamReader isr = new InputStreamReader(is, "UTF-8");
+			LineNumberReader lnr = new LineNumberReader(isr);
+			String line = null;
+			while ((line = lnr.readLine()) != null) {
+				line = line.trim();
+				logger.log(Level.INFO,  "Loading runtime extension ''{0}''", line);
+				try {
+					@SuppressWarnings("unchecked")
+					Class<ServiceRuntime> clazz = (Class<ServiceRuntime>) cl.loadClass(line);
+					ServiceRuntime sr = clazz.newInstance();
+					result.add(sr);
+				} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
+					logger.log(Level.WARNING, MessageFormat.format("Runtime extension of class ''{0}'' could not be instantiated", line), e);
+				} 
+			}
+		}
+		logger.log(Level.INFO, "Number of classpath services found: {0}", result.size());
+		return result;
+	}
+	
+	static {
+		List<ServiceRuntime> allRuntimes = new ArrayList<>(Arrays.asList( //
+				new HealthCheckServiceRuntime(), //
+				new JavaServiceRuntime(), //
+				new SparkServiceRuntime() //
+		));
+		try {
+			List<ServiceRuntime> runtimeExtensions = getRuntimeExtensions();
+			allRuntimes.addAll(runtimeExtensions);
+		} catch (IOException e) {
+			logger.log(Level.WARNING, "An exception occurred when loading runtime extensions, ignoring them", e);
+		}
+		runtimes = Collections.unmodifiableList(allRuntimes);
+	}
+
+	private static List<ServiceRuntime> runtimes;
+
+	public static List<ServiceRuntime> getActiveRuntimes() {
+		Environment env = new ODFInternalFactory().create(Environment.class);
+		List<String> activeRuntimeNames = env.getActiveRuntimeNames();
+		if (activeRuntimeNames == null) {
+			return getAllRuntimes();
+		}
+		// always add health check runtime
+		Set<String> activeRuntimeNamesSet = new HashSet<>(activeRuntimeNames);
+		activeRuntimeNamesSet.add(HealthCheckServiceRuntime.HEALTH_CHECK_RUNTIME_NAME);
+		List<ServiceRuntime> activeRuntimes = new ArrayList<>();
+		for (ServiceRuntime rt : runtimes) {
+			if (activeRuntimeNamesSet.contains(rt.getName())) {
+				activeRuntimes.add(rt);
+			}
+		}
+		return activeRuntimes;
+	}
+
+	public static List<ServiceRuntime> getAllRuntimes() {
+		return runtimes;
+	}
+
+	public static ServiceRuntime getRuntimeForDiscoveryService(DiscoveryServiceProperties discoveryServiceProps) {
+		DiscoveryServiceEndpoint ep = discoveryServiceProps.getEndpoint();
+		for (ServiceRuntime runtime : getAllRuntimes()) {
+			if (runtime.getName().equals(ep.getRuntimeName())) {
+				return runtime;
+			}
+		}
+		return null;
+	}
+
+	public static ServiceRuntime getRuntimeForDiscoveryService(String discoveryServiceId) {
+		// special check because the healch check runtime is not part of the configuration
+		if (discoveryServiceId.startsWith(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID)) {
+			return new HealthCheckServiceRuntime();
+		}
+		DiscoveryServiceManager dsm = new ODFInternalFactory().create(DiscoveryServiceManager.class);
+		try {
+			DiscoveryServiceProperties props = dsm.getDiscoveryServiceProperties(discoveryServiceId);
+			return getRuntimeForDiscoveryService(props);
+		} catch (ServiceNotFoundException e) {
+			return null;
+		}
+	}
+
+	public static ServiceRuntimesInfo getRuntimesInfo(List<ServiceRuntime> runtimes) {
+		List<ServiceRuntimeInfo> rts = new ArrayList<>();
+		for (ServiceRuntime rt : runtimes) {
+			ServiceRuntimeInfo sri = new ServiceRuntimeInfo();
+			sri.setName(rt.getName());
+			sri.setDescription(rt.getDescription());
+			rts.add(sri);
+		}
+		ServiceRuntimesInfo result = new ServiceRuntimesInfo();
+		result.setRuntimes(rts);
+		return result;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java
new file mode 100755
index 0000000..6dc1fd0
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkDiscoveryServiceProxy.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.text.MessageFormat;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.core.Utils;
+
+/**
+ * Proxy for calling any type of Spark discovery services.
+ * 
+ *
+ */
+
+public class SparkDiscoveryServiceProxy implements SyncDiscoveryService {
+	Logger logger = Logger.getLogger(SparkDiscoveryServiceProxy.class.getName());
+
+	protected MetadataStore metadataStore;
+	protected AnnotationStore annotationStore;
+	protected ExecutorService executorService;
+	private DiscoveryServiceProperties dsri;
+
+	public SparkDiscoveryServiceProxy(DiscoveryServiceProperties dsri) {
+		this.dsri = dsri;
+	}
+
+	@Override
+	public void setExecutorService(ExecutorService executorService) {
+		this.executorService = executorService;
+	}
+
+	@Override
+	public void setMetadataStore(MetadataStore metadataStore) {
+		this.metadataStore = metadataStore;
+	}
+
+	@Override
+	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
+		DataSetCheckResult checkResult = new DataSetCheckResult();
+		checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
+		try {
+			SparkServiceExecutor executor = new ODFInternalFactory().create(SparkServiceExecutor.class);
+			checkResult = executor.checkDataSet(this.dsri, dataSetContainer);
+		} catch (Exception e) {
+			logger.log(Level.WARNING,"Error running discovery service.", e);
+			checkResult.setDetails(Utils.getExceptionAsString(e));
+		}
+		return checkResult;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		logger.log(Level.INFO,MessageFormat.format("Starting Spark discovery service ''{0}'', id {1}.", new Object[]{ dsri.getName(), dsri.getId() }));
+		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+		DiscoveryServiceSparkEndpoint endpoint;
+		try {
+			endpoint = JSONUtils.convert(dsri.getEndpoint(),  DiscoveryServiceSparkEndpoint.class);
+		} catch (JSONException e1) {
+			throw new RuntimeException(e1);
+		}
+		if ((endpoint.getJar() == null) || (endpoint.getJar().isEmpty())) {
+			response.setDetails("No jar file  was provided that implements the Spark application.");
+		} else try {
+			SparkServiceExecutor executor = new ODFInternalFactory().create(SparkServiceExecutor.class);
+			response = executor.runAnalysis(this.dsri, request);
+			logger.log(Level.FINER, "Spark discovery service response: " + response.toString());
+			logger.log(Level.INFO,"Spark discover service finished.");
+			return response;
+		} catch (Exception e) {
+			logger.log(Level.WARNING,"Error running Spark application: ", e);
+			response.setDetails(Utils.getExceptionAsString(e));
+		}
+		response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+		return response;
+	}
+
+	@Override
+	public void setAnnotationStore(AnnotationStore annotationStore) {
+		this.annotationStore = annotationStore;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java
new file mode 100755
index 0000000..91056b3
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/SparkServiceRuntime.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class SparkServiceRuntime implements ServiceRuntime {
+
+	public static final String SPARK_RUNTIME_NAME = "Spark";
+	
+	@Override
+	public String getName() {
+		return SPARK_RUNTIME_NAME;
+	}
+
+	@Override
+	public long getWaitTimeUntilAvailable() {
+		return 0;
+	}
+
+	@Override
+	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
+		return new SparkDiscoveryServiceProxy(props);
+	}
+
+	@Override
+	public String getDescription() {
+		return "The default Spark runtime";
+	}
+
+	@Override
+	public void validate(DiscoveryServiceProperties props) throws ValidationException {
+		try {
+			JSONUtils.convert(props.getEndpoint(),  DiscoveryServiceSparkEndpoint.class);
+		} catch (JSONException e1) {
+			throw new ValidationException("Endpoint definition for Spark service is not correct: " + Utils.getExceptionAsString(e1));
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java
new file mode 100755
index 0000000..206a6d0
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/StatusQueueEntry.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+
+// JSON
+public class StatusQueueEntry {
+
+	private Annotation annotation;
+	private AnalysisRequestTracker analysisRequestTracker;
+
+	public Annotation getAnnotation() {
+		return annotation;
+	}
+
+	public void setAnnotation(Annotation annotation) {
+		this.annotation = annotation;
+	}
+
+	public AnalysisRequestTracker getAnalysisRequestTracker() {
+		return analysisRequestTracker;
+	}
+
+	public void setAnalysisRequestTracker(AnalysisRequestTracker analysisRequestTracker) {
+		this.analysisRequestTracker = analysisRequestTracker;
+	}
+
+	
+	public static String getRequestId(StatusQueueEntry seq) {
+		if (seq.getAnnotation() != null) {
+			return seq.getAnnotation().getAnalysisRun();
+		} else if (seq.getAnalysisRequestTracker() != null) {
+			return seq.getAnalysisRequestTracker().getRequest().getId();
+		}
+		return null;
+	}
+
+	
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java
new file mode 100755
index 0000000..33dba10
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/ThreadManager.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+
+public interface ThreadManager {
+
+	void waitForThreadsToBeReady(long waitingLimitMs, List<ThreadStartupResult> startedThreads) throws TimeoutException;
+
+	ThreadStartupResult startUnmanagedThread(String name, ODFRunnable runnable);
+	
+	ThreadStatus.ThreadState getStateOfUnmanagedThread(String name);
+	
+	ODFRunnable getRunnable(String name);
+	
+	void setExecutorService(ExecutorService executorService);
+	
+	void shutdownAllUnmanagedThreads();
+	
+	void shutdownThreads(List<String> names);
+	
+	int getNumberOfRunningThreads();
+
+	List<ThreadStatus> getThreadManagerStatus();
+
+	public abstract class ThreadStartupResult {
+
+		private String threadId;
+		private boolean newThreadCreated;
+
+		public ThreadStartupResult(String id) {
+			this.threadId = id;
+		}
+
+		public String getThreadId() {
+			return threadId;
+		}
+
+		public boolean isNewThreadCreated() {
+			return newThreadCreated;
+		}
+
+		public void setNewThreadCreated(boolean newThreadCreated) {
+			this.newThreadCreated = newThreadCreated;
+		}
+
+		public abstract boolean isReady();
+
+	}
+
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java
new file mode 100755
index 0000000..f1c7704
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TrackerUtil.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+
+public class TrackerUtil {
+	
+	/**
+	 * @param tracker
+	 * @return true if the first analysis of the tracker has not yet been started
+	 */
+	public static boolean isAnalysisWaiting(AnalysisRequestTracker tracker) {
+		return tracker.getNextDiscoveryServiceRequest() == 0 && (tracker.getStatus() == STATUS.IN_DISCOVERY_SERVICE_QUEUE || tracker.getStatus() == STATUS.INITIALIZED); // || tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING);
+	}
+	
+	public static boolean isCancellable(AnalysisRequestTracker tracker)  {
+		return (tracker.getStatus() == STATUS.IN_DISCOVERY_SERVICE_QUEUE || tracker.getStatus() == STATUS.INITIALIZED || tracker.getStatus() == STATUS.DISCOVERY_SERVICE_RUNNING);
+	}
+
+	public static DiscoveryServiceRequest getCurrentDiscoveryServiceStartRequest(AnalysisRequestTracker tracker) {
+		int i = tracker.getNextDiscoveryServiceRequest();
+		List<DiscoveryServiceRequest> requests = tracker.getDiscoveryServiceRequests();
+		if (i >= 0 && i < requests.size()) {
+			return requests.get(i);
+		}
+		return null;
+	}
+
+	public static DiscoveryServiceResponse getCurrentDiscoveryServiceStartResponse(AnalysisRequestTracker tracker) {
+		int i = tracker.getNextDiscoveryServiceRequest();
+		List<DiscoveryServiceResponse> responses = tracker.getDiscoveryServiceResponses();
+		if (responses == null || responses.isEmpty()) {
+			return null;
+		}
+		if (i >= 0 && i < responses.size()) {
+			return responses.get(i);
+		}
+		return null;
+	}
+
+	public static void moveToNextDiscoveryService(AnalysisRequestTracker tracker) {
+		int i = tracker.getNextDiscoveryServiceRequest();
+		List<DiscoveryServiceRequest> requests = tracker.getDiscoveryServiceRequests();
+		if (i >= 0 && i < requests.size()) {
+			tracker.setNextDiscoveryServiceRequest(i+1);
+		}
+	}
+
+	public static void addDiscoveryServiceStartResponse(AnalysisRequestTracker tracker, DiscoveryServiceResponse response) {
+		List<DiscoveryServiceResponse> l = tracker.getDiscoveryServiceResponses();
+		if (l == null) {
+			l = new ArrayList<DiscoveryServiceResponse>();
+			tracker.setDiscoveryServiceResponses(l);
+		}
+		l.add(response);
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java
new file mode 100755
index 0000000..1a3de04
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionAsyncDiscoveryServiceProxy.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+
+public class TransactionAsyncDiscoveryServiceProxy implements AsyncDiscoveryService {
+
+	private AsyncDiscoveryService wrappedService;
+
+	public TransactionAsyncDiscoveryServiceProxy(AsyncDiscoveryService wrappedService) {
+		this.wrappedService = wrappedService;
+	}
+
+	public DiscoveryServiceAsyncStartResponse startAnalysis(final DiscoveryServiceRequest request) {
+		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+		try {
+			return (DiscoveryServiceAsyncStartResponse) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+
+				@Override
+				public Object call() throws Exception {
+					return wrappedService.startAnalysis(request);
+				}
+			});
+		} catch (Exception e) {
+			throw new RuntimeException(e);
+		}
+
+	}
+
+	public DiscoveryServiceAsyncRunStatus getStatus(final String runId) {
+		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+		try {
+			return (DiscoveryServiceAsyncRunStatus) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+
+				@Override
+				public Object call() throws Exception {
+					return wrappedService.getStatus(runId);
+				}
+			});
+		} catch (Exception e) {
+			throw new RuntimeException(e);
+		}
+
+	}
+
+	public void setExecutorService(ExecutorService executorService) {
+		wrappedService.setExecutorService(executorService);
+	}
+
+	public void setMetadataStore(MetadataStore metadataStore) {
+		wrappedService.setMetadataStore(metadataStore);
+	}
+
+	public void setAnnotationStore(AnnotationStore annotationStore) {
+		wrappedService.setAnnotationStore(annotationStore);
+	}
+
+	public DataSetCheckResult checkDataSet(final DataSetContainer dataSetContainer) {
+		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+		try {
+			return (DataSetCheckResult) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+
+				@Override
+				public Object call() throws Exception {
+					return wrappedService.checkDataSet(dataSetContainer);
+				}
+			});
+		} catch (Exception e) {
+			throw new RuntimeException(e);
+		}
+
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java
new file mode 100755
index 0000000..6c17686
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionContextExecutor.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Use this interface in the core framework whenever you want to run code that is run from an unmanaged thread (typically in the Kafka consumers)
+ * and that accesses the metadata repository. The implementation of this class will ensure that the code will be run in the
+ * correct context (regarding transactions etc.)
+ * 
+ *
+ */
+public interface TransactionContextExecutor {
+	
+	/**
+	 * Run a generic callable in a transaction context. This is not a template function as some of the underlying infrastructures
+	 * might not be able to support it.
+	 */
+	Object runInTransactionContext(Callable<Object> callable) throws Exception;
+	
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java
new file mode 100755
index 0000000..ec96e96
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/controlcenter/TransactionSyncDiscoveryServiceProxy.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.controlcenter;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+
+public class TransactionSyncDiscoveryServiceProxy implements SyncDiscoveryService {
+
+	private SyncDiscoveryService wrappedService;
+
+	public TransactionSyncDiscoveryServiceProxy(SyncDiscoveryService wrappedService) {
+		this.wrappedService = wrappedService;
+	}
+
+	public DiscoveryServiceSyncResponse runAnalysis(final DiscoveryServiceRequest request) {
+		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+		try {
+			return (DiscoveryServiceSyncResponse) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+
+				@Override
+				public Object call() throws Exception {
+					return wrappedService.runAnalysis(request);
+				}
+			});
+		} catch (Exception e) {
+			throw new RuntimeException(e);
+		}
+
+	}
+
+	public void setExecutorService(ExecutorService executorService) {
+		wrappedService.setExecutorService(executorService);
+	}
+
+	public void setMetadataStore(MetadataStore metadataStore) {
+		wrappedService.setMetadataStore(metadataStore);
+	}
+
+	public void setAnnotationStore(AnnotationStore annotationStore) {
+		wrappedService.setAnnotationStore(annotationStore);
+	}
+
+	public DataSetCheckResult checkDataSet(final DataSetContainer dataSetContainer) {
+		TransactionContextExecutor transactionContextExecutor = new ODFInternalFactory().create(TransactionContextExecutor.class);
+		try {
+			return (DataSetCheckResult) transactionContextExecutor.runInTransactionContext(new Callable<Object>() {
+
+				@Override
+				public Object call() throws Exception {
+					return wrappedService.checkDataSet(dataSetContainer);
+				}
+			});
+		} catch (Exception e) {
+			throw new RuntimeException(e);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java
new file mode 100755
index 0000000..e7cbc44
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceManagerImpl.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.discoveryservice;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.configuration.ConfigManager;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+
+/**
+ *
+ * External Java API for creating and managing discovery services
+ *
+ */
+public class DiscoveryServiceManagerImpl implements DiscoveryServiceManager {
+	private Logger logger = Logger.getLogger(DiscoveryServiceManagerImpl.class.getName());
+	public ConfigManager configManager;
+
+	public DiscoveryServiceManagerImpl() {
+		configManager = new ODFInternalFactory().create(ConfigManager.class);
+	}
+
+	/**
+	 * Retrieve list of discovery services registered in ODF
+	 * @return List of registered ODF discovery services
+	 */
+	public List<DiscoveryServiceProperties> getDiscoveryServicesProperties() {
+		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServicesProperties");
+		List<DiscoveryServiceProperties> dsProperties = configManager.getConfigContainer().getRegisteredServices();
+		return dsProperties;
+	};
+
+	/**
+	 * Register a new service in ODF
+	 * @param dsProperties Properties of the discovery service to register
+	 * @throws ValidationException Validation of a property failed
+	 */
+	public void createDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "createDiscoveryService");
+		ConfigContainer update = new ConfigContainer();
+		List<DiscoveryServiceProperties> registeredServices = configManager.getConfigContainer().getRegisteredServices();
+		registeredServices.addAll(Collections.singletonList(dsProperties));
+		update.setRegisteredServices(registeredServices);
+		configManager.updateConfigContainer(update);
+
+
+	};
+
+	/**
+	 * Update configuration of an ODF discovery service
+	 * @param dsProperties Properties of the discovery service to update
+	 */
+	public void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ServiceNotFoundException, ValidationException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "updateDiscoveryService");
+		String serviceId = dsProperties.getId();
+		deleteDiscoveryService(serviceId);
+		createDiscoveryService(dsProperties);
+	};
+
+	/**
+	 * Remove a registered service from ODF
+	 * @param serviceId Discovery service ID
+	 */
+	public void deleteDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "deleteDiscoveryService");
+		ConfigContainer cc = configManager.getConfigContainer();
+		Iterator<DiscoveryServiceProperties> iterator = cc.getRegisteredServices().iterator();
+		boolean serviceFound = false;
+		while (iterator.hasNext()) {
+			if (iterator.next().getId().equals(serviceId)) {
+				iterator.remove();
+				serviceFound = true;
+			}
+		}
+		if (!serviceFound) {
+			throw new ServiceNotFoundException(serviceId);
+		} else {
+			configManager.updateConfigContainer(cc);
+		}
+	};
+
+	/**
+	 * Retrieve current configuration of a discovery services registered in ODF
+	 * @param serviceId Discovery Service ID
+	 * @return Properties of the service with this ID
+	 * @throws ServiceNotFoundException A service with this ID is not registered
+	 */
+	public DiscoveryServiceProperties getDiscoveryServiceProperties(String serviceId) throws ServiceNotFoundException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceProperties");
+		DiscoveryServiceProperties serviceFound = null;
+		List<DiscoveryServiceProperties> registeredServices;
+		registeredServices = configManager.getConfigContainer().getRegisteredServices();
+		for (DiscoveryServiceProperties service : registeredServices) {
+			if (service.getId().equals(serviceId)) {
+				serviceFound = service;
+				break;
+			}
+		}
+		if (serviceFound == null) {
+			throw new ServiceNotFoundException(serviceId);
+		}
+		return serviceFound;
+	};
+
+	/**
+	 * Retrieve status overview of all discovery services registered in ODF
+	 * @return List of status count maps for all discovery services
+	 */
+	public List<ServiceStatusCount> getDiscoveryServiceStatusOverview() {
+		DiscoveryServiceStatistics stats = new DiscoveryServiceStatistics(new ODFInternalFactory().create(AnalysisRequestTrackerStore.class).getRecentTrackers(0,-1));
+		return stats.getStatusCountPerService();
+	}
+
+	/**
+	 * Retrieve status of a specific discovery service. Returns null if no service info can be obtained
+	 * @param serviceId Discovery Service ID
+	 * @return Status of the service with this ID
+	 */
+	public DiscoveryServiceStatus getDiscoveryServiceStatus(String serviceId) throws ServiceNotFoundException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceStatus");
+
+		DiscoveryServiceStatus dsStatus = null;
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		DiscoveryService ds = cc.getDiscoveryServiceProxy(serviceId, null);
+		if (ds == null) {
+			throw new ServiceNotFoundException(serviceId);
+		}
+		dsStatus = new DiscoveryServiceStatus();
+		dsStatus.setStatus(DiscoveryServiceStatus.Status.OK);
+		dsStatus.setMessage(MessageFormat.format("Discovery service ''{0}'' status is OK", serviceId));
+		ServiceStatusCount serviceStatus = null;
+		List<ServiceStatusCount> statusCounts = getDiscoveryServiceStatusOverview();
+		for (ServiceStatusCount cnt : statusCounts) {
+			if (cnt.getId().equals(serviceId)) {
+				serviceStatus = cnt;
+				break;
+			}
+		}
+		if (serviceStatus != null) {
+			dsStatus.setStatusCount(serviceStatus);
+		}
+		return dsStatus;
+	};
+
+	/**
+	 * Retrieve runtime statistics of a specific discovery service
+	 * @param serviceId Discovery Service ID
+	 * @return Runtime statistics of the service with this ID
+	 */
+	public DiscoveryServiceRuntimeStatistics getDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceRuntimeStatistics");
+		DiscoveryServiceRuntimeStatistics dsrs = new DiscoveryServiceRuntimeStatistics();
+		dsrs.setAverageProcessingTimePerItemInMillis(0);   // TODO: implement
+		return dsrs;
+	};
+
+	/**
+	 * Delete runtime statistics of a specific discovery service
+	 * @param serviceId Discovery Service ID
+	 */
+	public void deleteDiscoveryServiceRuntimeStatistics(String serviceId) throws ServiceNotFoundException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "deleteDiscoveryServiceRuntimeStatistics");
+		// TODO: implement
+	};
+
+	/**
+	 * Retrieve picture representing a discovery service
+	 * @param serviceId Discovery Service ID
+	 * @return Input stream for image
+	 */
+	public InputStream getDiscoveryServiceImage(String serviceId) throws ServiceNotFoundException {
+		logger.entering(DiscoveryServiceManager.class.getName(), "getDiscoveryServiceImage");
+		final String defaultImageDir = "org/apache/atlas/odf/images";
+
+		String imgUrl = null;
+		for (DiscoveryServiceProperties info : configManager.getConfigContainer().getRegisteredServices()) {
+			if (info.getId().equals(serviceId)) {
+				imgUrl = info.getIconUrl();
+				break;
+			}
+		}
+
+		ClassLoader cl = this.getClass().getClassLoader();
+		InputStream is = null;
+		if (imgUrl != null) {
+			is = cl.getResourceAsStream("META-INF/odf/" + imgUrl);
+			if (is == null) {
+				is = cl.getResourceAsStream(defaultImageDir + "/" + imgUrl);
+				if (is == null) {
+					try {
+						is = new URL(imgUrl).openStream();
+					} catch (MalformedURLException e) {
+						logger.log(Level.WARNING, "The specified image url {0} for service {1} is invalid!", new String[] { imgUrl, serviceId });
+					} catch (IOException e) {
+						logger.log(Level.WARNING, "The specified image url {0} for service {1} could not be accessed!", new String[] { imgUrl, serviceId });
+					}
+				}
+			}
+		}
+		if (imgUrl == null || is == null) {
+			//TODO is this correct? maybe we should use a single default image instead of a random one
+			try {
+				is = cl.getResourceAsStream(defaultImageDir);
+				if (is != null) {
+					InputStreamReader r = new InputStreamReader(is);
+					BufferedReader br = new BufferedReader(r);
+					List<String> images = new ArrayList<>();
+					String line = null;
+					while ((line = br.readLine()) != null) {
+						images.add(line);
+					}
+					// return random image
+					int ix = Math.abs(serviceId.hashCode()) % images.size();
+					is = cl.getResourceAsStream(defaultImageDir + "/" + images.get(ix));
+				}
+			} catch (IOException exc) {
+				logger.log(Level.WARNING, "Exception occurred while retrieving random image, ignoring it", exc);
+				is = null;
+			}
+		}
+		return is;
+	};
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java
new file mode 100755
index 0000000..6be0e5a
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/discoveryservice/DiscoveryServiceStatistics.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.discoveryservice;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
+
+public class DiscoveryServiceStatistics {
+
+	private List<AnalysisRequestTracker> requests = new ArrayList<AnalysisRequestTracker>();
+
+	public DiscoveryServiceStatistics(List<AnalysisRequestTracker> requests) {
+		this.requests = requests;
+	}
+
+	public List<ServiceStatusCount> getStatusCountPerService() {
+		List<ServiceStatusCount> result = new ArrayList<ServiceStatusCount>();
+
+		Map<String, LinkedHashMap<STATUS, Integer>> statusMap = new HashMap<String, LinkedHashMap<STATUS, Integer>>();
+
+		for (AnalysisRequestTracker tracker : requests) {
+			int maxDiscoveryServiceRequest = (tracker.getNextDiscoveryServiceRequest() == 0 ? 1 : tracker.getNextDiscoveryServiceRequest());
+			for (int no = 0; no < maxDiscoveryServiceRequest; no++) {
+				STATUS cntStatus = tracker.getStatus();
+
+				//No parallel requests are possible atm -> all requests leading to current one must be finished
+				if (no < maxDiscoveryServiceRequest - 1) {
+					cntStatus = STATUS.FINISHED;
+				}
+
+				DiscoveryServiceRequest req = tracker.getDiscoveryServiceRequests().get(no);
+				LinkedHashMap<STATUS, Integer> cntMap = statusMap.get(req.getDiscoveryServiceId());
+				if (cntMap == null) {
+					cntMap = new LinkedHashMap<STATUS, Integer>();
+					//add 0 default values
+					for (STATUS status : STATUS.values()) {
+						cntMap.put(status, 0);
+					}
+				}
+				Integer val = cntMap.get(cntStatus);
+				val++;
+				cntMap.put(cntStatus, val);
+				statusMap.put(req.getDiscoveryServiceId(), cntMap);
+			}
+		}
+
+		for (String key : statusMap.keySet()) {
+			ServiceStatusCount cnt = new ServiceStatusCount();
+			cnt.setId(key);
+			for (DiscoveryServiceProperties info : new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
+				if (info.getId().equals(key)) {
+					cnt.setName(info.getName());
+					break;
+				}
+			}
+			cnt.setStatusCountMap(statusMap.get(key));
+			result.add(cnt);
+		}
+
+		return result;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java
new file mode 100755
index 0000000..d09297a
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/engine/EngineManagerImpl.java
@@ -0,0 +1,221 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.engine;
+
+import java.io.InputStream;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.engine.EngineManager;
+import org.apache.atlas.odf.api.engine.MessagingStatus;
+import org.apache.atlas.odf.api.engine.ODFEngineOptions;
+import org.apache.atlas.odf.api.engine.ODFStatus;
+import org.apache.atlas.odf.api.engine.ODFVersion;
+import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
+import org.apache.atlas.odf.api.engine.SystemHealth;
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.core.ODFInitializer;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.ODFUtils;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage.Type;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+
+/**
+*
+* External Java API for managing and controlling the ODF engine
+*
+*/
+public class EngineManagerImpl implements EngineManager {
+
+	private Logger logger = Logger.getLogger(EngineManagerImpl.class.getName());
+
+	public EngineManagerImpl() {
+	}
+
+	/**
+	 * Checks the health status of ODF
+	 *
+	 * @return Health status of the ODF engine
+	 */
+	public SystemHealth checkHealthStatus() {
+		SystemHealth health = new SystemHealth();
+		try {
+			AnalysisRequest dummyRequest = new AnalysisRequest();
+			String dataSetID = ControlCenter.HEALTH_TEST_DATA_SET_ID_PREFIX + UUID.randomUUID().toString();
+			MetaDataObjectReference dataSetRef = new MetaDataObjectReference();
+			dataSetRef.setId(dataSetID);
+			dummyRequest.setDataSets(Collections.singletonList(dataSetRef));
+			List<String> discoveryServiceSequence = new ArrayList<String>();
+			discoveryServiceSequence.add(ControlCenter.HEALTH_TEST_DISCOVERY_SERVICE_ID);
+			dummyRequest.setDiscoveryServiceSequence(discoveryServiceSequence);
+
+			AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+			AnalysisResponse resp = analysisManager.runAnalysis(dummyRequest);
+			String reqId = resp.getId();
+			AnalysisRequestStatus status = null;
+			final int maxNumberOfTimesToPoll = 500;
+			int count = 0;
+			int msToSleepBetweenPolls = 20;
+			boolean continuePolling = false;
+			do {
+				status = analysisManager.getAnalysisRequestStatus(reqId);
+				continuePolling = (status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.NOT_FOUND) && count < maxNumberOfTimesToPoll;
+				if (continuePolling) {
+					count++;
+					Thread.sleep(msToSleepBetweenPolls);
+				}
+			} while (continuePolling);
+			logger.log(Level.INFO, "Health check request ''{3}'' has status ''{0}'', time spent: {2}ms details ''{1}''", new Object[] { status.getState(), status.getDetails(),
+					count * msToSleepBetweenPolls, reqId });
+			health.getMessages().add(MessageFormat.format("Details message: {0}", status.getDetails()));
+			if (count >= maxNumberOfTimesToPoll) {
+				health.setStatus( SystemHealth.HealthStatus.WARNING);
+				String msg = MessageFormat.format("Health test request could not be processed in time ({0}ms)", (maxNumberOfTimesToPoll * msToSleepBetweenPolls));
+				logger.log(Level.INFO, msg);
+				health.getMessages().add(msg);
+			} else {
+				switch (status.getState()) {
+				case NOT_FOUND:
+					health.setStatus(SystemHealth.HealthStatus.ERROR);
+					health.getMessages().add(MessageFormat.format("Request ID ''{0}'' got lost", reqId));
+					break;
+				case ERROR:
+					health.setStatus(SystemHealth.HealthStatus.ERROR);
+					break;
+				case FINISHED:
+					health.setStatus(SystemHealth.HealthStatus.OK);
+					break;
+				default:
+					health.setStatus(SystemHealth.HealthStatus.ERROR);
+				}
+			}
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An unknown error occurred", exc);
+			health.setStatus(SystemHealth.HealthStatus.ERROR);
+			health.getMessages().add(Utils.getExceptionAsString(exc));
+		}
+		return health;
+	}
+
+	/**
+	 * Returns the status of the ODF thread manager
+	 *
+	 * @return Status of all threads making up the ODF thread manager
+	 */
+	public List<ThreadStatus> getThreadManagerStatus() {
+		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
+		return tm.getThreadManagerStatus();
+	}
+
+	/**
+	 * Returns the status of the ODF messaging subsystem
+	 *
+	 * @return Status of the ODF messaging subsystem
+	 */
+	public MessagingStatus getMessagingStatus() {
+		return new ODFInternalFactory().create(DiscoveryServiceQueueManager.class).getMessagingStatus();
+	}
+
+	/**
+	 * Returns the status of the messaging subsystem and the internal thread manager
+	 *
+	 * @return Combined status of the messaging subsystem and the internal thread manager
+	 */
+	public ODFStatus getStatus() {
+		ODFStatus status = new ODFStatus();
+		status.setMessagingStatus(this.getMessagingStatus());
+		status.setThreadManagerStatus(this.getThreadManagerStatus());
+		return status;
+	}
+
+	/**
+	 * Returns the current ODF version
+	 *
+	 * @return ODF version identifier
+	 */
+	public ODFVersion getVersion() {
+		InputStream is = ODFUtils.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/odfversion.txt");
+		ODFVersion version = new ODFVersion();
+		if (is == null) {
+			version.setVersion("NOTFOUND");
+		} else {
+			version.setVersion(Utils.getInputStreamAsString(is, "UTF-8").trim());
+		}
+		return version;
+	}
+
+	/**
+	 * Shuts down the ODF engine, purges all scheduled analysis requests from the queues, and cancels all running analysis requests.
+	 * This means that all running jobs will be cancelled or their results will not be reported back.
+	 * (for debugging purposes only)
+	 *
+	 * @param options Option for immediately restarting the engine after shutdown (default is not to restart immediately but only when needed)
+	 */
+	public void shutdown(ODFEngineOptions options) {
+		long currentTime = System.currentTimeMillis();
+
+		ControlCenter controlCenter = new ODFInternalFactory().create(ControlCenter.class);
+		AdminMessage shutDownMessage = new AdminMessage();
+		Type t = Type.SHUTDOWN;
+		if (options.isRestart()) {
+			t = Type.RESTART;
+		}
+		shutDownMessage.setAdminMessageType(t);
+		String detailMsg = MessageFormat.format("Shutdown was requested on {0} via ODF API", new Object[] { new Date() });
+		shutDownMessage.setDetails(detailMsg);
+		logger.log(Level.INFO, detailMsg);
+		controlCenter.getQueueManager().enqueueInAdminQueue(shutDownMessage);
+		int maxPolls = 60;
+		int counter = 0;
+		int timeBetweenPollsMs = 1000;
+		while (counter < maxPolls && ODFInitializer.getLastStopTimestamp() <= currentTime) {
+			try {
+				Thread.sleep(timeBetweenPollsMs);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+			}
+			counter++;
+		}
+		long timeWaited = ((counter * timeBetweenPollsMs) / 1000);
+		logger.log(Level.INFO, "Waited for {0} seconds for shutdown", timeWaited);
+		if (counter >= maxPolls) {
+			logger.log(Level.WARNING, "Waited for shutdown too long. Continuing." );
+		} else {
+			logger.log(Level.INFO, "Shutdown issued successfully");
+		}
+	}
+
+	@Override
+	public ServiceRuntimesInfo getRuntimesInfo() {
+		return ServiceRuntimes.getRuntimesInfo(ServiceRuntimes.getAllRuntimes());
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java
new file mode 100755
index 0000000..9177556
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DefaultMessageEncryption.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging;
+
+/**
+ * Default encryption: no encryption
+ * 
+ */
+public class DefaultMessageEncryption implements MessageEncryption {
+	
+	@Override
+	public String encrypt(String message) {
+		return message;
+	}
+
+	@Override
+	public String decrypt(String message) {
+		return message;
+	}
+
+
+	/*
+	// this used to be our default encryption. Leaving it in here for reference.
+	@Override
+	public String encrypt(String message) {
+		try {
+			return DatatypeConverter.printBase64Binary(message.getBytes("UTF-8"));
+		} catch (UnsupportedEncodingException e) {
+			throw new RuntimeException(e);
+		}
+	}
+
+	@Override
+	public String decrypt(String message)  {
+		try {
+			return new String(DatatypeConverter.parseBase64Binary(message), "UTF-8");
+		} catch (UnsupportedEncodingException e) {
+			throw new RuntimeException(e);
+		}
+	}
+	*/
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java
new file mode 100755
index 0000000..d2d84dd
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/DiscoveryServiceQueueManager.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging;
+
+import java.util.concurrent.TimeoutException;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.engine.MessagingStatus;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage;
+import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
+
+
+
+public interface DiscoveryServiceQueueManager {
+	
+	void start() throws TimeoutException;
+	
+	void stop() throws TimeoutException;
+		
+	// find the next queue where this tracker should go and put it there
+	void enqueue(AnalysisRequestTracker tracker);
+	
+	void enqueueInStatusQueue(StatusQueueEntry sqe);
+	
+	void enqueueInAdminQueue(AdminMessage message);
+	
+	MessagingStatus getMessagingStatus();
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java
new file mode 100755
index 0000000..ad1bf28
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/messaging/MessageEncryption.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging;
+
+public interface MessageEncryption {
+	String encrypt(String message);
+
+	String decrypt(String message);
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java
new file mode 100755
index 0000000..c71ba3c
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/DefaultMetadataStore.java
@@ -0,0 +1,381 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.AnnotationPropagator;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.metadata.DefaultMetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+ * In-memory implementation of MetadataStore interface to be used for testing as
+ * well as for single-node ODF deployments. Uses static HashMaps for storing the
+ * metadata types and objects.
+ * 
+ * 
+ */
+public class DefaultMetadataStore extends WritableMetadataStoreBase implements WritableMetadataStore {
+	private Logger logger = Logger.getLogger(DefaultMetadataStore.class.getName());
+
+	private static final String METADATA_STORE_ID = "ODF_LOCAL_METADATA_STORE";
+	private static final String STORE_PROPERTY_TYPE = "default";
+	private static final String STORE_PROPERTY_DESCRIPTION = "ODF local metadata store";
+
+	private static HashMap<String, String> typeStore;
+	private static HashMap<String, StoredMetaDataObject> objectStore;
+	protected LinkedHashMap<String, StoredMetaDataObject> stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
+	private static boolean isInitialized = false;
+	protected static Object accessLock = new Object();
+	static Object initializationLock = new Object();
+
+	public DefaultMetadataStore() {
+		synchronized (initializationLock) {
+			if (!isInitialized) {
+				isInitialized = true;
+				this.resetAllData();
+			}
+		}
+	}
+
+	protected WritableMetadataStore getMetadataStore() {
+		return this;
+	}
+
+	protected Object getAccessLock() {
+		return accessLock;
+	}
+
+	protected HashMap<String, StoredMetaDataObject> getObjects() {
+		return objectStore;
+	}
+
+	protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects() {
+		return stagedObjects;
+	}
+
+	@Override
+    public ConnectionInfo getConnectionInfo(MetaDataObject informationAsset) {
+    	synchronized(accessLock) {
+    		return WritableMetadataStoreUtils.getConnectionInfo(this, informationAsset);
+    	}
+    };
+
+	@Override
+	public void resetAllData() {
+		logger.log(Level.INFO, "Resetting all data in metadata store.");
+		synchronized (accessLock) {
+			typeStore = new HashMap<String, String>();
+			objectStore = new HashMap<String, StoredMetaDataObject>();
+			createTypes(WritableMetadataStoreUtils.getBaseTypes());
+		}
+	}
+
+	@Override
+	public Properties getProperties() {
+		Properties props = new Properties();
+		props.put(MetadataStore.STORE_PROPERTY_DESCRIPTION, STORE_PROPERTY_DESCRIPTION);
+		props.put(MetadataStore.STORE_PROPERTY_TYPE, STORE_PROPERTY_TYPE);
+		props.put(STORE_PROPERTY_ID, METADATA_STORE_ID);
+		return props;
+	}
+
+	@Override
+	public String getRepositoryId() {
+		return METADATA_STORE_ID;
+	}
+
+	@Override
+	public List<MetaDataObjectReference> search(String query) {
+		if ((query == null) || query.isEmpty()) {
+			throw new MetadataStoreException("The search term cannot be null or empty.");
+		}
+		logger.log(Level.INFO, MessageFormat.format("Processing query \"{0}\".", query));
+		synchronized (accessLock) {
+			LinkedList<String> queryElements = new LinkedList<String>();
+			for (String el : query.split(DefaultMetadataQueryBuilder.SEPARATOR_STRING)) {
+				queryElements.add(el);
+			}
+			List<MetaDataObjectReference> result = new ArrayList<MetaDataObjectReference>();
+			String firstOperator = queryElements.removeFirst();
+
+			if (firstOperator.equals(DefaultMetadataQueryBuilder.DATASET_IDENTIFIER)) {
+				String requestedObjectType = queryElements.removeFirst();
+				for (StoredMetaDataObject currentInternalObject : getObjects().values()) {
+					MetaDataObject currentObject = currentInternalObject.getMetaDataObject();
+					String currentObjectType = getObjectType(currentObject);
+					try {
+						if (isSubTypeOf(requestedObjectType, currentObjectType)
+								&& isConditionMet(currentObject, queryElements)) {
+							result.add(currentObject.getReference());
+						}
+					} catch (IllegalArgumentException | IllegalAccessException e) {
+						throw new MetadataStoreException(
+								MessageFormat.format("Error processing \"{0}\" clause of query.",
+										DefaultMetadataQueryBuilder.DATASET_IDENTIFIER));
+					}
+				}
+				return result;
+			} else {
+				throw new MetadataStoreException(MessageFormat.format("Query ''{0}'' is not valid.", query));
+			}
+		}
+	}
+
+	@Override
+	public void createSampleData() {
+		logger.log(Level.INFO, "Creating sample data in metadata store.");
+		SampleDataHelper.copySampleFiles();
+		WritableMetadataStoreUtils.createSampleDataObjects(this);
+	}
+
+	@Override
+	public AnnotationPropagator getAnnotationPropagator() {
+		return new AnnotationPropagator() {
+
+			@Override
+			public void propagateAnnotations(AnnotationStore as, String requestId) {
+				List<Annotation> annotations = as.getAnnotations(null, requestId);
+				for (Annotation annot : annotations) {
+					ensureAnnotationTypeExists(annot);
+					annot.setReference(null); // Set reference to null because a new reference will be generated by the metadata store
+					getMetadataStore().createObject(annot);
+					commit();
+				}
+			}
+		};
+	}
+
+	/**
+	 * Internal helper that creates a list of types in the metadata store.
+	 *
+	 * @param typeList List of types to be created
+	 */
+	private void createTypes(List<Class<?>> typeList) {
+		synchronized (accessLock) {
+			for (Class<?> type : typeList) {
+				if (!typeStore.containsKey(type.getSimpleName())) {
+					logger.log(Level.INFO,
+							MessageFormat.format("Creating new type \"{0}\" in metadata store.", type.getSimpleName()));
+					typeStore.put(type.getSimpleName(), type.getSuperclass().getSimpleName());
+				} else {
+					throw new MetadataStoreException(MessageFormat.format(
+							"A type with the name \"{0}\" already exists in this metadata store.", type.getName()));
+				}
+			}
+		}
+	};
+
+	/**
+	 * Internal helper that returns the type name of a given metadata object.
+	 *
+	 * @param mdo Metadata object
+	 * @return Type name 
+	 */
+	protected String getObjectType(MetaDataObject mdo) {
+		if (mdo instanceof Annotation) {
+			// Important when using the MetadataStore as an AnnotationStore
+			return ((Annotation) mdo).getAnnotationType();
+		} else {
+			return mdo.getClass().getSimpleName();
+		}
+	}
+
+	/**
+	 * Internal helper that checks if a type is a sub type of another type 
+	 *
+	 * @param subTypeName Name of the type that is supposed to be the sub type
+	 * @param parentTypeName Name of the type that is supposed to be the parent type
+	 */
+	private boolean isSubTypeOf(String subTypeName, String parentTypeName) {
+		if (subTypeName.equals(parentTypeName)) {
+			return true;
+		}
+		if (typeStore.get(parentTypeName) != null) {
+			String parent = typeStore.get(parentTypeName);
+			if ((parent != null) && (!parent.equals(parentTypeName))) {
+				if (isSubTypeOf(subTypeName, parent)) {
+					return true;
+				}
+			}
+		}
+		return false;
+	}
+
+	/**
+	 * Internal helper that checks if the attributes of a given metadata object meet a given condition. 
+	 *
+	 * @param mdo Metadata object
+	 * @param condition List of tokens that make up the condition phrase
+	 */
+	private boolean isConditionMet(MetaDataObject mdo, LinkedList<String> condition)
+			throws IllegalArgumentException, IllegalAccessException {
+		if (condition.isEmpty()) {
+			return true;
+		}
+		LinkedList<String> clonedCondition = new LinkedList<String>();
+		clonedCondition.addAll(condition);
+		try {
+			JSONObject mdoJson = JSONUtils.toJSONObject(mdo);
+			logger.log(Level.FINER, MessageFormat.format("Evaluating object \"{0}\".", mdoJson));
+			while (clonedCondition.size() >= 4) {
+				// Each condition clause consists of four elements, e.g. "where
+				// name = 'BankClientsShort'" or "and name = 'BankClientsShort'"
+				String operator = clonedCondition.removeFirst();
+				String attribute = clonedCondition.removeFirst();
+				String comparator = clonedCondition.removeFirst();
+				String expectedValueWithQuotes = clonedCondition.removeFirst();
+				while ((!expectedValueWithQuotes.endsWith(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) && (clonedCondition.size() != 0)) {
+					expectedValueWithQuotes = expectedValueWithQuotes + DefaultMetadataQueryBuilder.SEPARATOR_STRING + clonedCondition.removeFirst();
+				}
+				if (operator.equals(DefaultMetadataQueryBuilder.CONDITION_PREFIX)
+						|| operator.equals(DefaultMetadataQueryBuilder.AND_IDENTIFIER)) {
+					if (mdoJson.containsKey(attribute)) {
+						String actualValue = (String) mdoJson.get(attribute) != null ? mdoJson.get(attribute).toString() : null;
+						if (comparator.equals(DefaultMetadataQueryBuilder.EQUALS_IDENTIFIER)) {
+							if (!expectedValueWithQuotes.equals(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER + actualValue + DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) {
+								// Condition is not met
+								return false;
+							}
+						} else if (comparator.equals(DefaultMetadataQueryBuilder.NOT_EQUALS_IDENTIFIER)) {
+							if (expectedValueWithQuotes.equals(DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER + actualValue + DefaultMetadataQueryBuilder.QUOTE_IDENTIFIER)) {
+								// Condition is not met
+								return false;
+							}
+						} else {
+							throw new MetadataStoreException(
+									MessageFormat.format("Unknown comparator \"{0}\" in query condition \"{1}\".",
+											new Object[] { comparator, condition.toString() }));
+						}
+					} else {
+						logger.log(Level.INFO,
+								MessageFormat.format("The object does not contain attribute \"{0}\".", attribute));
+						// Condition is not met
+						return false;
+					}
+				} else {
+					throw new MetadataStoreException(
+							MessageFormat.format("Syntax error in query condition \"{0}\".", condition.toString()));
+				}
+			}
+			if (clonedCondition.size() != 0) {
+				throw new MetadataStoreException(
+						MessageFormat.format("Error parsing trailing query elements \"{0}\".", clonedCondition));
+			}
+			// All conditions are met
+			return true;
+		} catch (JSONException e) {
+			throw new MetadataStoreException(MessageFormat.format("Error parsing JSON object {0} in query.", mdo), e);
+		}
+	}
+
+	/**
+	 * Internal helper that merges the references of a staged metadata object with the references of the current metadata object
+	 * stored in the metadata store. The missing references are added to the provided object in place.
+	 *
+	 * @param object Internal representation of a staged metadata object
+	 */
+	private void mergeReferenceMap(StoredMetaDataObject object) {
+		HashMap<String, List<MetaDataObjectReference>> mergedObjectRefMap = new HashMap<String, List<MetaDataObjectReference>>();
+		String objectId = object.getMetaDataObject().getReference().getId();
+		if (getObjects().get(objectId) != null) {
+			// Only merge if the object already exists in the metadata store
+			HashMap<String, List<MetaDataObjectReference>> originalRefMap = getObjects().get(objectId)
+					.getReferenceMap(); // Get reference map of exiting object
+			HashMap<String, List<MetaDataObjectReference>> updatedObjectRefMap = object.getReferenceMap();
+			for (String referenceId : updatedObjectRefMap.keySet()) {
+				// Update original reference map in place
+				mergedObjectRefMap.put(referenceId,
+						InternalMetaDataUtils.mergeReferenceLists(originalRefMap.get(referenceId), updatedObjectRefMap.get(referenceId)));
+			}
+			object.setReferencesMap(mergedObjectRefMap);
+		}
+	}
+
+	@Override
+	public void commit() {
+		synchronized (accessLock) {
+			// Check if all required types exist BEFORE starting to create the
+			// objects in order to avoid partial creation of objects
+			for (Map.Entry<String, StoredMetaDataObject> mapEntry : this.stagedObjects.entrySet()) {
+				String typeName = getObjectType(mapEntry.getValue().getMetaDataObject());
+				if ((typeName == null) || !typeStore.containsKey(typeName)) {
+					throw new MetadataStoreException(MessageFormat.format(
+							"The type \"{0}\" of the object you are trying to create does not exist in this metadata store.",
+							typeName));
+				}
+			}
+
+			// Move objects from staging area into metadata store
+			for (Map.Entry<String, StoredMetaDataObject> mapEntry : this.stagedObjects.entrySet()) {
+				StoredMetaDataObject object = mapEntry.getValue();
+				String typeName = getObjectType(mapEntry.getValue().getMetaDataObject());
+				logger.log(Level.INFO,
+						MessageFormat.format(
+								"Creating or updating object with id ''{0}'' and type ''{1}'' in metadata store.",
+								new Object[] { object.getMetaDataObject().getReference(), typeName }));
+				String objectId = object.getMetaDataObject().getReference().getId();
+				mergeReferenceMap(object); // Merge new object references with
+											// existing object references in
+											// metadata store
+				getObjects().put(objectId, object);
+			}
+
+			// Clear staging area
+			stagedObjects = new LinkedHashMap<String, StoredMetaDataObject>();
+		}
+	}
+
+	/**
+	 * Internal helper that creates a new annotation type in the internal type store if it does not yet exist.
+	 *
+	 * @param mds Metadata store to operate on
+	 */
+	private void ensureAnnotationTypeExists(Annotation annotation) {
+		String annotationType = annotation.getAnnotationType();
+		if (typeStore.get(annotationType) == null) {
+			if (annotation instanceof ProfilingAnnotation) {
+				typeStore.put(annotationType, "ProfilingAnnotation");
+			} else if (annotation instanceof ClassificationAnnotation) {
+				typeStore.put(annotationType, "ClassificationAnnotation");
+			} else if (annotation instanceof RelationshipAnnotation) {
+				typeStore.put(annotationType, "RelationshipAnnotation");
+			}
+		}
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java
new file mode 100755
index 0000000..4bccd6c
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/JDBCMetadataImporterImpl.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
+import org.apache.atlas.odf.api.metadata.importer.MetadataImportException;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+
+public class JDBCMetadataImporterImpl implements JDBCMetadataImporter {
+	Logger logger = Logger.getLogger(JDBCMetadataImporterImpl.class.getName());
+	private WritableMetadataStore mds;
+	WritableMetadataStoreUtils mdsUtils;
+
+	public JDBCMetadataImporterImpl() {
+		MetadataStore currentMds = new ODFFactory().create().getMetadataStore();
+		if (currentMds instanceof WritableMetadataStore) {
+			this.mds = (WritableMetadataStore) currentMds;
+		} else {
+			String errorText = "Cannot import data because metadata store ''{0}'' does not support the WritableMetadataStore interface.";
+			throw new RuntimeException(MessageFormat.format(errorText , currentMds.getClass()));
+		}
+	}
+
+	@Override
+	public JDBCMetadataImportResult importTables(JDBCConnection connection, String dbName, String schemaPattern, String tableNamePattern)  {
+		Connection conn = null;
+		try {
+			logger.log(Level.FINE, "Importing tables...");
+			conn = DriverManager.getConnection(connection.getJdbcConnectionString(), connection.getUser(), connection.getPassword());
+			DatabaseMetaData dmd = conn.getMetaData();
+			List<MetaDataObjectReference> matchingDatabases = mds.search(mds.newQueryBuilder().objectType("Database").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, dbName).build());
+			Database odfDatabase = null;
+			if (!matchingDatabases.isEmpty()) {
+				odfDatabase = (Database) mds.retrieve(matchingDatabases.get(0));
+				mds.updateObject(odfDatabase);
+			} else {
+				odfDatabase = new Database();
+				List<MetaDataObjectReference> conList = new ArrayList<MetaDataObjectReference>();
+				odfDatabase.setConnections(conList);
+				odfDatabase.setName(dbName);
+				odfDatabase.setDbType(dmd.getDatabaseProductName());
+				odfDatabase.setDescription("Database " + dbName + " imported by JDBC2AtlasImporter on " + new Date());
+				mds.createObject(odfDatabase);
+			}
+			Map<String, Table> tableMap = new HashMap<String, Table>();
+			Map<String, Schema> schemaMap = new HashMap<>();
+			List<MetaDataObjectReference> schemaList = new ArrayList<MetaDataObjectReference>();
+			Set<String> tableNames = new HashSet<>();
+			ResultSet columnRS = dmd.getColumns(null, schemaPattern, tableNamePattern, null);
+			while (columnRS.next()) {
+				String columnName = columnRS.getString("COLUMN_NAME");
+				String schemaName = columnRS.getString("TABLE_SCHEM");
+				String tableName = columnRS.getString("TABLE_NAME");
+				String dataType = columnRS.getString("TYPE_NAME");
+				
+				Schema schema = schemaMap.get(schemaName);
+				if (schema == null) {
+					for (Schema s : mds.getSchemas(odfDatabase)) {
+						if (schemaName.equals(s.getName())) {
+							schema = s;
+							mds.updateObject(schema);
+							break;
+						}
+					}
+					if (schema == null) {
+						schema = new Schema();
+						schema.setName(schemaName);
+						schemaList.add(mds.createObject(schema));
+					}
+					schemaMap.put(schemaName, schema);
+					mds.addSchemaReference(odfDatabase, schema.getReference());
+				}
+				
+				String key = schemaName + "." + tableName;
+				Table tableObject = tableMap.get(key);
+				if (tableObject == null) {
+					for (Table t : mds.getTables(schema)) {
+						if (tableName.equals(t.getName())) {
+							tableObject = t;
+							mds.updateObject(tableObject);
+							break;
+						}
+					}
+					if (tableObject == null) {
+						tableObject = new Table();
+						tableObject.setName(tableName);
+						MetaDataObjectReference ref = mds.createObject(tableObject);
+						tableObject.setReference(ref);
+					}
+					tableNames.add(tableName);
+					tableMap.put(key, tableObject);
+					mds.addTableReference(schema, tableObject.getReference());
+				}
+				Column column = null;
+				for (Column c : mds.getColumns(tableObject)) {
+					if (columnName.equals(c.getName())) {
+						column = c;
+						break;
+					}
+				}
+				if (column == null) {
+					// Add new column only if a column with the same name does not exist
+					column = WritableMetadataStoreUtils.createColumn(columnName, dataType, null);
+					mds.createObject(column);
+				}
+				mds.addColumnReference(tableObject, column.getReference());
+			}
+			columnRS.close();
+			logger.log(Level.INFO, "Found {0} tables in database ''{1}'': ''{2}''", new Object[]{tableMap.keySet().size(), dbName, tableNames });
+
+			JDBCConnection odfConnection = null;
+			for (MetaDataObject c : mds.getConnections(odfDatabase)) {
+				if ((c instanceof JDBCConnection) && connection.getJdbcConnectionString().equals(((JDBCConnection) c).getJdbcConnectionString())) {
+					odfConnection = (JDBCConnection) c;
+					mds.updateObject(odfConnection);
+					break;
+				}
+			}
+			if (odfConnection == null) {
+				odfConnection = new JDBCConnection();
+				odfConnection.setJdbcConnectionString(connection.getJdbcConnectionString());
+				odfConnection.setUser(connection.getUser());
+				odfConnection.setPassword(connection.getPassword());
+				odfConnection.setDescription("JDBC connection for database " + dbName);
+				mds.createObject(odfConnection);
+			}
+			mds.addConnectionReference(odfDatabase, odfConnection.getReference());
+
+			mds.commit();
+			return new JDBCMetadataImportResult(dbName, odfDatabase.getReference().getId(), new ArrayList<String>( tableMap.keySet() ));
+		} catch (SQLException exc) {
+			throw new MetadataImportException(exc);
+		} finally {
+			if (conn != null) {
+				try {
+					conn.close();
+				} catch (SQLException e) {
+					e.printStackTrace();
+				}
+			}
+		}
+
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java
new file mode 100755
index 0000000..9169d8a
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/SampleDataHelper.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.text.MessageFormat;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.Utils;
+
+public class SampleDataHelper {
+	private static Logger logger = Logger.getLogger(SampleDataHelper.class.getName());
+	private static final String SAMPLE_DATA_FILE_LIST = "sample-data-toc.properties";
+	private static final String SAMPLE_DATA_FILE_FOLDER = "org/apache/atlas/odf/core/metadata/internal/sampledata/";
+
+	public static void copySampleFiles() {
+		Properties toc = new Properties();
+		ClassLoader cl = SampleDataHelper.class.getClassLoader();
+		try {
+			toc.load(cl.getResourceAsStream(SAMPLE_DATA_FILE_FOLDER + SAMPLE_DATA_FILE_LIST));
+
+			for (String contentFileName : toc.stringPropertyNames()) {
+				logger.log(Level.INFO, "Processing sample file: {0}", contentFileName);
+				String url = copySampleDataFileContents(cl.getResourceAsStream(SAMPLE_DATA_FILE_FOLDER + contentFileName), contentFileName);
+				logger.log(Level.INFO, "Sample data file ''{0}'' copied to {1}", new Object[] { contentFileName, url });
+			}
+		} catch(IOException e) {
+			logger.log(Level.FINE, "An unexpected exception ocurred while connecting to Atlas", e);
+			String messageText = MessageFormat.format("Content file list {0} could not be accessed.", SAMPLE_DATA_FILE_FOLDER + SAMPLE_DATA_FILE_LIST);
+			throw new RuntimeException(messageText, e);
+		}
+		logger.log(Level.INFO, "All sample data files created");
+	}
+
+	private static String copySampleDataFileContents(InputStream is, String contentFile) throws IOException {
+		String url = null;
+		String target = null;
+		String os = System.getProperty("os.name").toLowerCase();
+		if (os.startsWith("windows")) {
+			url = "file://localhost/c:/tmp/" + contentFile;
+			target = "c:/tmp/" + contentFile;
+		} else {
+			url = "file:///tmp/" + contentFile;
+			target = "/tmp/" + contentFile;
+		}
+		String content = Utils.getInputStreamAsString(is, "UTF-8");
+		FileOutputStream fos = new FileOutputStream(target);
+		fos.write(content.getBytes("UTF-8"));
+		fos.close();
+		return url;
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java
new file mode 100755
index 0000000..8cc56d6
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStore.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+
+/**
+ * Interface to be implemented by metadata stores that support write access, i.e. the creation of new metadata objects,
+ * update of existing metadata objects, and creation of references between metadata objects. The new or updated objects
+ * and references remain in a staging area until they are committed. This is necessary in order to avoid inconsistent
+ * states during comprehensive write operations.  
+ * 
+ *
+ */
+public interface WritableMetadataStore extends MetadataStore {
+
+	/**
+	 * Add a new metadata object to the staging area of the metadata store.
+	 * If the object already has a reference, the reference id might be changed when committing the new object.  
+	 *
+	 * @param metaDataObject Metadata object
+	 */
+	public MetaDataObjectReference createObject(MetaDataObject metaDataObject);
+
+	/**
+	 * Add an updated metadata object to the staging area of the metadata store. The object reference must point to an
+	 * existing object in the metadata store.  
+	 *
+	 * @param metaDataObject Metadata object
+	 */
+	public void updateObject(MetaDataObject metaDataObject);
+
+	/**
+	 * Apply all staged changes to the metadata store.
+	 *
+	 */
+	public void commit();
+
+	/**
+	 * Add a data file reference to an updated or new data file folder in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param folder Data file folder to add the reference to
+	 * @param reference Reference of the data file to be added to the folder 
+	 */
+	public void addDataFileReference(DataFileFolder folder, MetaDataObjectReference reference);
+
+	/**
+	 * Add a data file folder reference to an updated or new data file folder in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param folder Data file folder to add the reference to
+	 * @param reference Reference of the data file folder to be added to the folder 
+	 */
+	public void addDataFileFolderReference(DataFileFolder folder, MetaDataObjectReference reference);
+
+	/**
+	 * Add a schema reference to an updated or new database in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param database Database to add the reference to
+	 * @param reference Reference of the schema to be added to the database 
+	 */
+	public void addSchemaReference(Database database, MetaDataObjectReference reference);
+
+	/**
+	 * Add a table reference to an updated or new schema in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param schema Schema to add the reference to
+	 * @param reference Reference of the table to be added to the schema 
+	 */
+	public void addTableReference(Schema schema, MetaDataObjectReference reference);
+
+	/**
+	 * Add a column reference to an updated or new relational data set in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param relationalDataSet Relational data set to add the reference to
+	 * @param reference Reference of the column to be added to the relational data set 
+	 */
+	public void addColumnReference(RelationalDataSet relationalDataSet, MetaDataObjectReference reference);
+
+	/**
+	 * Add a connection reference to an updated or new data store in the staging area.
+	 * The new reference will be merged with existing references during the commit operation.
+	 *
+	 * @param dataStore Data store set to add the reference to
+	 * @param reference Reference of the connection to be added to the data store 
+	 */
+	public void addConnectionReference(DataStore dataStore, MetaDataObjectReference reference);
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java
new file mode 100755
index 0000000..d5f8772
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreBase.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.InternalMetadataStoreBase;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.StoredMetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+
+/**
+ * Common base for writable metadata stores.
+ * Note that the methods implemented by InternalMetadataStoreBase are not necessarily used by all classes that extend WritableMetadataStoreBase.
+ * (If Java supported multiple inheritance, WritableMetadataStoreBase and InternalMetadataStoreBase would be independent classes.)
+ * 
+ * 
+ */
+public abstract class WritableMetadataStoreBase extends InternalMetadataStoreBase implements WritableMetadataStore {
+	private static Logger logger = Logger.getLogger(WritableMetadataStoreBase.class.getName());
+
+	abstract protected LinkedHashMap<String, StoredMetaDataObject> getStagedObjects();
+
+	private void addReference(MetaDataObject metaDataObject, String attributeName, MetaDataObjectReference reference) {
+		if (metaDataObject.getReference() == null) {
+			throw new MetadataStoreException("Cannot add a reference because metadata object reference is null.");
+		}
+		StoredMetaDataObject obj = this.getStagedObjects().get(metaDataObject.getReference().getId());
+		if (obj != null) {
+			if (obj.getReferenceMap().get(attributeName) == null) {
+				obj.getReferenceMap().put(attributeName, new ArrayList<MetaDataObjectReference>());
+			}
+			obj.getReferenceMap().get(attributeName).add(reference);
+		} else {
+			String errorMessage = MessageFormat.format("A staged object with id ''{0}'' does not exist. Create or update the object before adding a reference.", metaDataObject.getReference().getId());
+			throw new MetadataStoreException(errorMessage);
+		}
+	}
+
+	@Override
+	public void addDataFileReference(DataFileFolder folder, MetaDataObjectReference reference) {
+		addReference(folder, ODF_DATAFILES_REFERENCE, reference);
+	}
+
+	@Override
+	public void addDataFileFolderReference(DataFileFolder folder, MetaDataObjectReference reference) {
+		addReference(folder, ODF_DATAFILEFOLDERS_REFERENCE, reference);
+	}
+
+	@Override
+	public void addSchemaReference(Database database, MetaDataObjectReference reference) {
+		addReference(database, ODF_SCHEMAS_REFERENCE, reference);
+	}
+
+	@Override
+	public void addTableReference(Schema schema, MetaDataObjectReference reference) {
+		addReference(schema, ODF_TABLES_REFERENCE, reference);
+	}
+
+	@Override
+	public void addColumnReference(RelationalDataSet relationalDataSet, MetaDataObjectReference reference) {
+		addReference(relationalDataSet, ODF_COLUMNS_REFERENCE, reference);
+	}
+
+	@Override
+	public void addConnectionReference(DataStore dataStore, MetaDataObjectReference reference) {
+		addReference(dataStore, ODF_CONNECTIONS_REFERENCE, reference);
+	}
+
+	@Override
+	public MetaDataObjectReference createObject(MetaDataObject metaDataObject) {
+		if (metaDataObject.getReference() == null) {
+			metaDataObject.setReference(WritableMetadataStoreUtils.generateMdoRef(this));
+		}
+		this.getStagedObjects().put(metaDataObject.getReference().getId(), new StoredMetaDataObject(metaDataObject));
+		logger.log(Level.FINE, "Added new new object of type ''{0}'' with id ''{1}'' to staging area.",
+				new Object[] { metaDataObject.getClass().getSimpleName(), metaDataObject.getReference().getId() });
+		return metaDataObject.getReference();
+	}
+
+	@Override
+	public void updateObject(MetaDataObject metaDataObject) {
+		if (metaDataObject.getReference() == null) {
+			throw new MetadataStoreException("Reference attribute cannot be ''null'' when updating a metadata object.");
+		}
+		if (retrieve(metaDataObject.getReference()) == null) {
+			throw new MetadataStoreException(
+					MessageFormat.format("An object wih id ''{0}'' does not extist in this metadata store.",
+							metaDataObject.getReference().getId()));
+		}
+		this.getStagedObjects().put(metaDataObject.getReference().getId(), new StoredMetaDataObject(metaDataObject));
+		logger.log(Level.FINE, "Added updated object of type ''{0}'' with id ''{1}'' to staging area.",
+				new Object[] { metaDataObject.getClass().getSimpleName(), metaDataObject.getReference().getId() });
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java
new file mode 100755
index 0000000..808b4d2
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/metadata/WritableMetadataStoreUtils.java
@@ -0,0 +1,297 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.BusinessTerm;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.ConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.DataStore;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.Document;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+
+/**
+ * Utilities to be used for implementing the {@link WritableMetadataStore} interface, i.e. for
+ * adding support for an additional writable metadata store to ODF.
+ *
+ *
+ */
+public class WritableMetadataStoreUtils {
+
+	/**
+	 * Utility method for creating an populating a new {@link Column} object. The object will have a generated reference
+	 * that uses a random id and points to a given metadata store.
+	 *
+	 * @param mds Metadata store to which the reference of the new column should point.
+	 * @param name Name of the new column
+	 * @param dataType Data type of the new column
+	 * @param description Description of the new column
+	 * @return The resulting column object
+	 */
+	public static Column createColumn(String name, String dataType, String description) {
+		Column column = new Column();
+		column.setName(name);
+		column.setDescription(description);
+		column.setDataType(dataType);
+		return column;
+	}
+
+	public static String getFileUrl(String shortFileName) {
+		if (System.getProperty("os.name").toLowerCase().startsWith("windows")) {
+			return "file://localhost/c:/tmp/" + shortFileName;
+		} else {
+			return "file:///tmp/" + shortFileName;
+		}
+	}
+
+	/**
+	 * Utility method for genrating a new metadata object reference that uses a random id and points
+	 * to a given metadata store.
+	 *
+	 * @param mds Metadata store to which the new reference should point
+	 * @return The resulting metadata object reference
+	 */
+	public static MetaDataObjectReference generateMdoRef(MetadataStore mds) {
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId(UUID.randomUUID().toString());
+		ref.setRepositoryId(mds.getRepositoryId());
+		ref.setUrl("");
+		return ref;
+	}
+
+	/**
+	 * Utility method providing the list of ODF example objects used for the ODF integration tests.
+	 * The references of the example objects point to a given metadata store.
+	 *
+	 * @param mds Metadata store
+	 * @return List of example objects
+	 */
+	public static void createSampleDataObjects(WritableMetadataStore mds) {
+		DataFile bankClients = new DataFile();
+		bankClients.setName("BankClientsShort");
+		bankClients.setDescription("A reduced sample data file containing bank clients.");
+		bankClients.setUrlString(getFileUrl("bank-clients-short.csv"));
+		mds.createObject(bankClients);
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("CLIENT_ID", "string", "A client ID (column 1)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("NAME", "string", "A client name (column 2)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("ADDRESS", "string", "A client's address (column 3)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("ZIP", "string", "Zip code (column 4)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("AGE", "double", "Age in years (column 5)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("GENDER", "string", "Person gender (column 6)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("MARITAL_STATUS", "string", "Marital status (column 7)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("PROFESSION", "string", "Profession (column 8)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("NBR_YEARS_CLI", "double", "The number of years how long the client has been with us (column 9)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("SAVINGS_ACCOUNT", "string", "Savings account number (column 10)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("ONLINE_ACCESS", "string", "A flag indicating if the client accesses her accounts online (column 11)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("JOINED_ACCOUNTS", "string", "A flag indicating if the client has joined accounts (column 12)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("BANKCARD", "string", "A flag indicating if the client has a bankcard (column 13)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("AVERAGE_BALANCE", "double", "The average balance over the last year (column 14)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("ACCOUNT_ID", "int", "Account Id / number (column 15)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("ACCOUNT_TYPE", "string", "Type of account (column 16)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("EMAIL", "string", "A flag indicating if the client has joined accounts (column 17)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("CCN", "string", "Credit card number (column 18)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("PHONE1", "string", "Primary hone number (column 19)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("PHONE2", "string", "Secondary phone number (column 20)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("CC", "string", "CC indicator (column 21)")));
+		mds.addColumnReference(bankClients, mds.createObject(createColumn("CONTACT", "string", "Contact in case of emergency (column 22)")));
+
+		DataFile simpleExampleTable = new DataFile();
+		simpleExampleTable.setName("SimpleExampleTable");
+		simpleExampleTable.setDescription("A very simple example document referring to a local file.");
+		simpleExampleTable.setUrlString(getFileUrl("simple-example-table.csv"));
+		mds.createObject(simpleExampleTable);
+		mds.addColumnReference(simpleExampleTable, mds.createObject(createColumn("ColumnName1", "string", null)));
+		mds.addColumnReference(simpleExampleTable, mds.createObject(createColumn("ColumnName2", "int", null)));
+
+		Document simpleExampleURLDocument = new Document();
+		simpleExampleURLDocument.setName("Simple URL example document");
+		simpleExampleURLDocument.setDescription("A very simple example document referring to a publicly available URL");
+		simpleExampleURLDocument.setUrlString("https://www.wikipedia.org");
+		simpleExampleURLDocument.setEncoding("ASCII");
+		mds.createObject(simpleExampleURLDocument);
+
+		Document simpleExampleDocument = new Document();
+		simpleExampleDocument.setName("Simple local example document");
+		simpleExampleDocument.setDescription("A very simple example document referring to a local file");
+		simpleExampleDocument.setUrlString(getFileUrl("simple-example-document.txt"));
+		simpleExampleDocument.setEncoding("ASCII");
+		mds.createObject(simpleExampleDocument);
+
+		BusinessTerm bankClientTerm1 = new BusinessTerm();
+		bankClientTerm1.setName("Address");
+		bankClientTerm1.setDescription("The mail address of a person or organization");
+		bankClientTerm1.setAbbreviations(Arrays.asList(new String[] { "Addr" }));
+		bankClientTerm1.setExample("257 Great Lister Street P O BOX 1107 Birmingham");
+		bankClientTerm1.setUsage("Outgoing mail (physical).");
+		mds.createObject(bankClientTerm1);
+
+		BusinessTerm bankClientTerm2a = new BusinessTerm();
+		bankClientTerm2a.setName("Marital Status");
+		bankClientTerm2a.setDescription("The marital status of a person (single, married, divorced, or other).");
+		bankClientTerm2a.setAbbreviations(Arrays.asList(new String[] { "MS","MAST" }));
+		bankClientTerm2a.setExample("single");
+		bankClientTerm2a.setUsage("Contracting");
+		mds.createObject(bankClientTerm2a);
+
+		BusinessTerm bankClientTerm2b = new BusinessTerm();
+		bankClientTerm2b.setReference(generateMdoRef(mds));
+		bankClientTerm2b.setName("Marital Status");
+		bankClientTerm2b.setDescription("2nd term representing the marital status of a person.");
+		bankClientTerm2b.setAbbreviations(Arrays.asList(new String[] { "MS","MAST" }));
+		bankClientTerm2b.setExample("married");
+		bankClientTerm2b.setUsage("Human Resources");
+		mds.createObject(bankClientTerm2b);
+
+		BusinessTerm bankClientTerm3 = new BusinessTerm();
+		bankClientTerm3.setName("AVG Balance");
+		bankClientTerm3.setDescription("The average balance of an account over an amount of time, typically a year. Unit: Dollars.");
+		bankClientTerm3.setAbbreviations(Arrays.asList(new String[] { "AB","AVGB","AVGBAL" }));
+		bankClientTerm3.setExample("1000");
+		bankClientTerm3.setUsage("Contracting");
+		bankClientTerm3.setOriginRef("test-pointer-to-igc");
+		bankClientTerm3.setReplicaRefs(Arrays.asList(new String[] { "first-replica-pointer", "second-replica-pointer" }));
+		mds.createObject(bankClientTerm3);
+
+		BusinessTerm bankClientTerm4 = new BusinessTerm();
+		bankClientTerm4.setName("LASTNAME");
+		bankClientTerm4.setDescription("Last name of a person");
+		bankClientTerm4.setAbbreviations(Arrays.asList(new String[] { "LASTNME" }));
+		bankClientTerm4.setExample("1000");
+		bankClientTerm4.setUsage("Contracting");
+		mds.createObject(bankClientTerm4);
+
+		BusinessTerm bankClientTerm5a = new BusinessTerm();
+		bankClientTerm5a.setReference(generateMdoRef(mds));
+		bankClientTerm5a.setName("Credit Card Number");
+		bankClientTerm5a.setDescription("Credit card number of a customer");
+		bankClientTerm5a.setAbbreviations(Arrays.asList(new String[] { "CreNum", "CCN" }));
+		bankClientTerm5a.setExample("1234567");
+		bankClientTerm5a.setUsage("Contracting");
+		mds.createObject(bankClientTerm5a);
+
+		BusinessTerm bankClientTerm5b = new BusinessTerm();
+		bankClientTerm5b.setReference(generateMdoRef(mds));
+		bankClientTerm5b.setName("Credit Card Number");
+		bankClientTerm5b.setDescription("Credit card number of an employee");
+		bankClientTerm5b.setAbbreviations(Arrays.asList(new String[] {}));      // this one has no abbreviations
+		bankClientTerm5b.setExample("1234567");
+		bankClientTerm5b.setUsage("Human Resources");
+		mds.createObject(bankClientTerm5b);
+
+		BusinessTerm bankClientTermDataSetLevel = new BusinessTerm();
+		bankClientTermDataSetLevel.setName("Bank Clients");
+		bankClientTermDataSetLevel.setDescription("The only purpose of this term is to match the name of the data set BankClientsShort");
+		bankClientTermDataSetLevel.setAbbreviations(Arrays.asList(new String[] { "BC" }));
+		bankClientTermDataSetLevel.setExample("<none>");
+		bankClientTermDataSetLevel.setUsage("Integration testing of TermMatcher discovery service. Yields confidence value of 56.");
+		mds.createObject(bankClientTermDataSetLevel);
+
+		mds.commit();
+	}
+
+	/**
+	 * Utility method that returns the list of ODF base types that need to be supported by a metadata store in order to be used with ODF.
+	 *
+	 * @return List of the ODF base types
+	 */
+	public static final List<Class<?>> getBaseTypes() {
+		List<Class<?>> typeList = new ArrayList<Class<?>>();
+		typeList.add(MetaDataObject.class);
+		typeList.add(DataStore.class);
+		typeList.add(Database.class);
+		typeList.add(Connection.class);
+		typeList.add(JDBCConnection.class);
+		typeList.add(DataSet.class);
+		typeList.add(UnknownDataSet.class);
+		typeList.add(RelationalDataSet.class);
+		typeList.add(Column.class);
+		typeList.add(Table.class);
+		typeList.add(Schema.class);
+		typeList.add(DataFileFolder.class);
+		typeList.add(DataFile.class);
+		typeList.add(Document.class);
+		typeList.add(Annotation.class);
+		typeList.add(ProfilingAnnotation.class);
+		typeList.add(ClassificationAnnotation.class);
+		typeList.add(RelationshipAnnotation.class);
+		typeList.add(BusinessTerm.class);
+		return typeList;
+	}
+
+	/**
+	* Utility method that returns a connection info object for a given information asset.
+	*
+	* @return Connection info object
+	*/
+    public static ConnectionInfo getConnectionInfo(MetadataStore mds, MetaDataObject informationAsset) {
+		if (informationAsset instanceof Table) {
+			Schema schema = getParentOfType(mds, informationAsset, Schema.class);
+			Database database = getParentOfType(mds, schema, Database.class);
+			JDBCConnectionInfo jdbcConnectionInfo = new JDBCConnectionInfo();
+			jdbcConnectionInfo.setSchemaName(schema.getName());
+			jdbcConnectionInfo.setTableName(informationAsset.getName());
+			jdbcConnectionInfo.setConnections(mds.getConnections(database));
+			jdbcConnectionInfo.setAssetReference(informationAsset.getReference());
+            return jdbcConnectionInfo;
+        }
+		return null;
+    };
+
+    /**
+	 * Utility to return the parent of a metadata object casted to a given type.
+	 * An exception is thrown if the types don't match.
+	 *
+	 * @param mds Metadata store
+	 * @param metaDataObject Metadata object
+	 * @param type Class to which the parent should be casted
+	 * @return Parent object of the given metadata object
+	 */
+	public static <T> T getParentOfType(MetadataStore mds, MetaDataObject metaDataObject, Class<T> type) {
+		MetaDataObject parent = mds.getParent(metaDataObject);
+		if (parent == null) {
+			String errorMessage = MessageFormat.format("Cannot extract connection info for object id ''{0}'' because the parent object is null.", metaDataObject.getReference().getId());
+			throw new MetadataStoreException(errorMessage);
+		}
+		if (!type.isInstance(parent)) {
+			String errorMessage = MessageFormat.format("Parent of object ''{0}'' is expected to be of type ''{1}'' but is ''{2}''",
+					new Object[] { metaDataObject.getReference().getId(), type.getSimpleName(), parent.getClass().getName() });
+	        throw new MetadataStoreException(errorMessage);
+		}
+		return type.cast(parent);
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java
new file mode 100755
index 0000000..f2f95ff
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/DefaultNotificationManager.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.notification;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class DefaultNotificationManager implements NotificationManager {
+
+	@Override
+	public List<NotificationListener> getListeners() {
+		return new ArrayList<>(); 
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java
new file mode 100755
index 0000000..fb6c37a
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationListener.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.notification;
+
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+
+public interface NotificationListener {
+	
+	/**
+	 * A human readable name for this listener. Used for logging and management.
+	 */
+	String getName();
+	
+	/**
+	 * The Kafka topic to listen on.
+	 */
+	String getTopicName();
+
+	/**
+	 * This is called whenever an event arrives. Typically, one would initiate
+	 * some analysis request on the passed odf instance.
+	 */
+	void onEvent(String event, OpenDiscoveryFramework odf);
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java
new file mode 100755
index 0000000..ce4d8ff
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/notification/NotificationManager.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.notification;
+
+import java.util.List;
+
+/**
+ * Provide implementations in the odf-implementations.properties file(s).
+ *
+ */
+public interface NotificationManager {
+	
+	 List<NotificationListener> getListeners();
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java
new file mode 100755
index 0000000..6b33cdd
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/settings/SettingsManagerImpl.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.settings;
+
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.configuration.ConfigManager;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.api.settings.KafkaConsumerConfig;
+import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
+import org.apache.atlas.odf.api.settings.MessagingConfiguration;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+*
+* External Java API for reading and updating ODF settings
+*
+*/
+public class SettingsManagerImpl implements SettingsManager {
+	public static final String HIDDEN_PASSWORD_IDENTIFIER = "***hidden***";
+	private ConfigManager configManager;
+
+	public SettingsManagerImpl() {
+		ODFInternalFactory f = new ODFInternalFactory();
+		configManager = f.create(ConfigManager.class);
+	}
+
+	/**
+	 * Retrieve Kafka consumer properties
+	 * @return Current Kafka consumer properties
+	 */
+	public Properties getKafkaConsumerProperties() {
+		Properties props = new Properties();
+		MessagingConfiguration messagingConfig = getODFSettings().getMessagingConfiguration();
+		if (!(messagingConfig instanceof KafkaMessagingConfiguration)) {
+			return props;
+		}
+		KafkaConsumerConfig config = ((KafkaMessagingConfiguration) messagingConfig).getKafkaConsumerConfig();
+		try {
+			JSONObject configJSON = JSONUtils.toJSONObject(config);
+			for (Object key : configJSON.keySet()) {
+				props.setProperty((String) key, String.valueOf(configJSON.get(key)));
+			}
+		} catch (JSONException e) {
+			throw new RuntimeException("The kafka consumer config could not be parsed!", e);
+		}
+		return props;
+	}
+
+	/**
+	 * Retrieve Kafka producer properties
+	 * @return Current Kafka producer properties
+	 */
+	public Properties getKafkaProducerProperties() {
+		// Currently no producer properties are editable and therefore not
+		// stored in the config file
+		Properties props = new Properties();
+		props.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+		props.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+		return props;
+	}
+
+	/**
+	 * Retrieve overall ODF settings including plain passwords
+	 * @return Current ODF settings
+	 */
+	public ODFSettings getODFSettings() {
+		return configManager.getConfigContainer().getOdf();
+	}
+
+	/**
+	 * Retrieve overall ODF settings with hidden passwords
+	 * @return Current ODF settings
+	 */
+	public ODFSettings getODFSettingsHidePasswords() {
+		return this.configManager.getConfigContainerHidePasswords().getOdf();
+	}
+
+	/**
+	 * Update ODF settings
+	 * 
+	 * Passwords provided as plain text will be encrypted. If HIDDEN_PASSWORD_IDENTIFIER
+	 * is provided instead of a password, the stored password will remain unchanged.
+	 * 
+	 * @param Updated ODF settings
+	 */
+	public void updateODFSettings(ODFSettings update) throws ValidationException {
+		ConfigContainer cont = new ConfigContainer();
+		cont.setOdf(update);
+		this.configManager.updateConfigContainer(cont);
+	}
+
+	/**
+	 * Reset ODF settings to the defaults
+	 */
+	public void resetODFSettings() {
+		new ODFInternalFactory().create(ConfigManager.class).resetConfigContainer();
+	}
+
+	/**
+	 * Retrieve user defined ODF properties
+	 * @return Map of user defined ODF properties
+	 */
+	public Map<String, Object> getUserDefinedConfig() {
+		return getODFSettings().getUserDefined();
+	}
+
+	/**
+	 * Update user defined ODF properties
+	 * @param Map of user defined ODF properties
+	 * @throws ValidationException
+	 */
+	public void updateUserDefined(Map<String, Object> update) throws ValidationException {
+		ODFSettings odfConfig = new ODFSettings();
+		odfConfig.setUserDefined(update);
+		updateODFSettings(odfConfig);
+	}
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/DefaultConfigurationStorage.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/DefaultConfigurationStorage.java
new file mode 100644
index 0000000..8428a3f
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/DefaultConfigurationStorage.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+
+public class DefaultConfigurationStorage implements ODFConfigurationStorage {
+
+	@Override
+	public void storeConfig(ConfigContainer container) {
+
+	}
+
+	@Override
+	public ConfigContainer getConfig(ConfigContainer defaultConfiguration) {
+		return defaultConfiguration;
+	}
+
+	@Override
+	public void onConfigChange(ConfigContainer container) {
+
+	}
+
+	@Override
+	public void addPendingConfigChange(String changeId) {
+
+	}
+
+	@Override
+	public void removePendingConfigChange(String changeId) {
+
+	}
+
+	@Override
+	public boolean isConfigChangePending(String changeId) {
+		return false;
+	}
+
+}
diff --git a/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java
new file mode 100755
index 0000000..5bfae91
--- /dev/null
+++ b/odf/odf-core/src/main/java/org/apache/atlas/odf/core/store/ODFConfigurationStorage.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+
+public interface ODFConfigurationStorage {
+	
+	public void storeConfig(ConfigContainer container);
+
+	public ConfigContainer getConfig(ConfigContainer defaultConfiguration);
+
+	public void onConfigChange(ConfigContainer container);
+
+	public void addPendingConfigChange(String changeId);
+
+	public void removePendingConfigChange(String changeId);
+
+	public boolean isConfigChangePending(String changeId);
+}
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties
new file mode 100755
index 0000000..7cb9477
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-default-implementation.properties
@@ -0,0 +1,32 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# default implementations
+
+org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore=org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore
+org.apache.atlas.odf.core.controlcenter.ThreadManager=org.apache.atlas.odf.core.controlcenter.DefaultThreadManager
+org.apache.atlas.odf.api.metadata.MetadataStore=org.apache.atlas.odf.core.metadata.DefaultMetadataStore
+org.apache.atlas.odf.api.annotation.AnnotationStore=org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore
+org.apache.atlas.odf.core.controlcenter.DiscoveryServiceMessageStore=org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore
+org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter=org.apache.atlas.odf.core.metadata.JDBCMetadataImporterImpl
+org.apache.atlas.odf.core.connectivity.DataSetRetriever=org.apache.atlas.odf.core.connectivity.DataSetRetrieverImpl
+org.apache.atlas.odf.api.spark.SparkServiceExecutor=org.apache.atlas.odf.core.spark.SparkServiceExecutorImpl
+org.apache.atlas.odf.core.Environment=org.apache.atlas.odf.core.StandaloneEnvironment
+org.apache.atlas.odf.api.analysis.AnalysisManager=org.apache.atlas.odf.core.analysis.AnalysisManagerImpl
+org.apache.atlas.odf.api.engine.EngineManager=org.apache.atlas.odf.core.engine.EngineManagerImpl
+org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager=org.apache.atlas.odf.core.discoveryservice.DiscoveryServiceManagerImpl
+org.apache.atlas.odf.api.settings.SettingsManager=org.apache.atlas.odf.core.settings.SettingsManagerImpl
+org.apache.atlas.odf.core.messaging.MessageEncryption=org.apache.atlas.odf.core.messaging.DefaultMessageEncryption
+org.apache.atlas.odf.core.controlcenter.TransactionContextExecutor=org.apache.atlas.odf.core.controlcenter.DefaultTransactionContextExecutor
+org.apache.atlas.odf.core.notification.NotificationManager=org.apache.atlas.odf.core.notification.DefaultNotificationManager
+org.apache.atlas.odf.core.store.ODFConfigurationStorage=org.apache.atlas.odf.core.store.DefaultConfigurationStorage
\ No newline at end of file
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json
new file mode 100755
index 0000000..5bd1d0d
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/internal/odf-initial-configuration.json
@@ -0,0 +1,28 @@
+{
+	"odf" : {
+		"instanceId" : "odf-default-id-CHANGEME",
+		"odfUrl" : "https://localhost:58081/odf-web-1.2.0-SNAPSHOT",
+		"odfUser" : "sdp",
+		"odfPassword" : "ZzTeX3hKtVORgks+2TaLPWxerucPBoxK",
+		"consumeMessageHubEvents" : false,
+		"discoveryServiceWatcherWaitMs": 2000,
+		"reuseRequests": true,
+		"runAnalysisOnImport": false,
+		"runNewServicesOnRegistration": false,
+		"enableAnnotationPropagation": true,
+		"messagingConfiguration": {
+			"type": "org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration",
+			"analysisRequestRetentionMs": 86400000,
+			"kafkaBrokerTopicReplication": 1,
+			"queueConsumerWaitMs": 5000,
+			"kafkaConsumerConfig": {
+				"offsetsStorage": "kafka",
+				"zookeeperSessionTimeoutMs": 400,
+				"zookeeperConnectionTimeoutMs": 6000
+			}
+		},
+		"userDefined": {
+		}
+	},
+	"registeredServices" : []
+}
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv
new file mode 100755
index 0000000..5efd809
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/bank-clients-short.csv
@@ -0,0 +1,500 @@
+"CLIENT_ID","NAME","ADDRESS","ZIP","AGE","GENDER","MARITAL_STATUS","PROFESSION","NBR_YEARS_CLI","SAVINGS_ACCOUNT","ONLINE_ACCESS","JOINED_ACCOUNTS","BANKCARD","AVERAGE_BALANCE","ACCOUNT_ID","ACCOUNT_TYPE","EMAIL","CCN","PHONE1","PHONE2","CC","CONTACT"
+"1578","Tyler O Abatemarco","WittonBirmingham","77019-6813",85.0,"M","married","pensioner",7.0,"YES","NO","YES","NO",4987.44,101578,"SAVINGS","tyler_abatemarco@ibm.com",NULL,"605-555-7281","406-555-5219","LR","207-555-2684"
+"1578","Tyler O Abatemarco","WittonBirmingham","77019-6813",85.0,"M","married","pensioner",7.0,"YES","NO","YES","NO",13484.56,201578,"CHECKING","tyler_abatemarco@ibm.com",NULL,"605-555-7281","406-555-5219","LR","207-555-2684"
+"1579","Debra M Pedrick","Kings HeathBirmingham","80110-4998",78.0,"M","widowed","pensioner",10.0,"NO","NO","YES","YES",2070.00,101579,"SAVINGS","debra_pedrick@icloud.com","4146 6643 9004 5458","385-555-9954","512-555-6256","IL",NULL
+"1580","Cassandra R Ker","Kings HeathBirmingham","33781-2153",38.0,"F","married","employee",23.0,"YES","NO","YES","NO",10927.00,101580,"SAVINGS","cassandra.ker@web.de","4146 6643 9004 5458","904-555-4068",NULL,"ML","cassandra_ker@t-online.de"
+"1581","Johnson Y Foltz","351-353 Lea Bridge RoadLeyton London","34787",75.0,"F","widowed","pensioner",16.0,"YES","NO","NO","YES",13961.16,101581,"SAVINGS","foltz@de.ibm.com","213130951856200","334-555-3249",NULL,"AZ","503-555-9423"
+"1581","Johnson Y Foltz","351-353 Lea Bridge RoadLeyton London","34787",75.0,"F","widowed","pensioner",16.0,"YES","NO","NO","YES",37746.84,201581,"CHECKING","foltz@de.ibm.com","213130951856200","334-555-3249",NULL,"AZ","503-555-9423"
+"1582","Mary H Jacques","121 W Canal Stre Unit 53Leeds","00000-0000",62.0,"F","married","inactive",26.0,"NO","NO","YES","NO",46700.00,101582,"SAVINGS","mary_j@ccdef.net","3400-000000-00009","651-555-1612","717-555-3906","VA","307-555-1593"
+"1583","Pual G Fowler","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","19175-5490",59.0,"F","married","inactive",7.0,"NO","NO","YES","NO",58749.00,101583,"SAVINGS","pual_fowler@aol.com","3400-000000-00009","404-555-4055",NULL,"GU","fowler@gmx.net"
+"1584","Thoang D Meyers","Woodilee RoadKirkintilloch Glasgo","78723-6199",86.0,"F","married","inactive",18.0,"YES","NO","NO","NO",1712.61,101584,"SAVINGS","thoang_m@gmx.net","3528-3095-1856-2063","717-555-9363","501-555-9558","FJ","303-555-5512"
+"1584","Thoang D Meyers","Woodilee RoadKirkintilloch Glasgo","78723-6199",86.0,"F","married","inactive",18.0,"YES","NO","NO","NO",4630.39,201584,"CHECKING","thoang_m@gmx.net","3528-3095-1856-2063","717-555-9363","501-555-9558","FJ","303-555-5512"
+"1585","Janet M Alcazar","CLITTAFORD RD SOUTHWAYPLYMOUTH","34741-5027",24.0,"M","single","worker",0.0,"NO","YES","NO","YES",824.00,101585,"SAVINGS","janet_alcazar@gmx.net","3400-000000-00009","785-555-2001","401-555-214","SA","503-555-3229"
+"1586","Richard A Pringle","257 Great Lister Street P O BOX 1107 Birmingham","32114-3851",57.0,"F","married","employee",37.0,"YES","NO","YES","NO",29236.00,101586,"SAVINGS","richardpringle@msl.org","5383908528354962","850-555-8595","505-555-156","CA","360-555-4969"
+"1587","Christina P Gee","EASTLEIGHHAMPSHIRE S050 4EX","86047-2538",66.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",4424.76,101587,"SAVINGS","gee@blue.com","5423111111111111","850-555-3929","317-555-6474","DJ","501-555-9658"
+"1587","Christina P Gee","EASTLEIGHHAMPSHIRE S050 4EX","86047-2538",66.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",11963.24,201587,"CHECKING","gee@blue.com","5423111111111111","850-555-3929","317-555-6474","DJ","501-555-9658"
+"1588","Maurits Q Schuller","2560 MCMULLEN BOOTH RDCLEARWATER","33761-4100",67.0,"F","single","pensioner",25.0,"YES","NO","NO","YES",22249.00,101588,"SAVINGS","mschuller@t-online.de","36111111111111","517-555-8548","202-555-9609","NP","785-555-219"
+"1589","Lillian R Isaac","FleetesexHampshire","33309-3421",72.0,"M","single","pensioner",26.0,"NO","NO","NO","YES",32952.00,101589,"SAVINGS","isaac@icloud.com","6520224090045455","517-555-8413",NULL,"PH","517-555-6352"
+"1590","Lucy V Adler","Blucher StreetBirmingham","27406-6355",62.0,"F","married","inactive",15.0,"NO","NO","YES","NO",3703.05,101590,"SAVINGS","adler@t-online.de","378282246310005","225-555-32",NULL,"HM","501-555-1240"
+"1590","Lucy V Adler","Blucher StreetBirmingham","27406-6355",62.0,"F","married","inactive",15.0,"NO","NO","YES","NO",10011.94,201590,"CHECKING","adler@t-online.de","378282246310005","225-555-32",NULL,"HM","501-555-1240"
+"1591","Cory J Gardner","5 Oxford RoadNewbury","34742-0460",88.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",12870.00,101591,"CHECKING","cory_g@t-online.de",NULL,"808-555-6833",NULL,"NI","860-555-5925"
+"1592","Dixie C Weitzel","Rockingham RoadLeicester","77318-6919",43.0,"M","married","farmer",22.0,"YES","NO","YES","NO",16174.00,101592,"CHECKING","dixie.weitzel@t-online.de","180030951856201","919-555-1628",NULL,"AL",NULL
+"1593","Cleo D Lamkin","58/59 Lower High StreetWest Midlands","86403-6886",58.0,"M","married","worker",31.0,"YES","NO","YES","NO",19956.78,101593,"CHECKING","lamkin@de.ibm.com","5169 7990 9185 4334","904-555-9488",NULL,"SJ","804-555-4083"
+"1593","Cleo D Lamkin","58/59 Lower High StreetWest Midlands","86403-6886",58.0,"M","married","worker",31.0,"YES","NO","YES","NO",53957.22,201593,"SAVINGS","lamkin@de.ibm.com","5169 7990 9185 4334","904-555-9488",NULL,"SJ","804-555-4083"
+"1594","Harm Z Rossman","Tame RoadBirmingham","",61.0,"F","married","inactive",18.0,"NO","NO","NO","NO",4862.00,101594,"CHECKING","harm_rossman@icloud.com","6220264390045758","417-555-4830",NULL,"DO","360-555-9576"
+"1595","Paulline X Daby","HockleyBirmingham","19477",64.0,"F","single","inactive",23.0,"NO","YES","YES","YES",3580.00,101595,"CHECKING","daby@cicn.gov","6011567891012132","775-555-1070",NULL,"AO","802-555-6080"
+"1596","Aaron L Ayala","Elmwood AvenueFeltham","77429-1770",22.0,"M","single","inactive",8.0,"NO","NO","NO","YES",1273.86,101596,"CHECKING","aaron_ayala@yahoo.com","5462522444922689","601-555-5744",NULL,"KP","aaron_ayala@ibm.com"
+"1596","Aaron L Ayala","Elmwood AvenueFeltham","77429-1770",22.0,"M","single","inactive",8.0,"NO","NO","NO","YES",3444.14,201596,"SAVINGS","aaron_ayala@yahoo.com","5462522444922689","601-555-5744",NULL,"KP","aaron_ayala@ibm.com"
+"1597","Roselie S Worley","69 MOLAND STRRETBIRMINGHAM","28204-2120",50.0,"M","married","executives,self-employed",17.0,"NO","NO","NO","YES",13245.00,101597,"CHECKING","roselie_w@ccdef.net","30411111111111","402-555-4602","701-555-4795","SO","360-555-686"
+"1598","Ajaymu R Parghi","AbercraveSwansea","80901",71.0,"F","married","inactive",20.0,"NO","NO","YES","NO",3589.00,101598,"CHECKING","parghi@gmx.net","4024 0071 2159 5481","207-555-5103",NULL,"BN","404-555-4633"
+"1599","Keith Z Flynn","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","85602-7058",45.0,"M","cohabitant","craftsmen, storekeepers",8.0,"NO","NO","YES","NO",1738.53,101599,"CHECKING","kflynn@web.de","5169799091854334","573-555-446","860-555-9280","YE","401-555-1095"
+"1599","Keith Z Flynn","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","85602-7058",45.0,"M","cohabitant","craftsmen, storekeepers",8.0,"NO","NO","YES","NO",4700.47,201599,"SAVINGS","kflynn@web.de","5169799091854334","573-555-446","860-555-9280","YE","401-555-1095"
+"1600","Lanh I Redding","Thurmaston LaneLeicester","30340-1452",24.0,"M","single","inactive",6.0,"NO","YES","NO","YES",24808.00,101600,"CHECKING","lanh.redding@ccdef.net","30111111161229","334-555-9104","860-555-501","TP","lredding@icloud.com"
+"1601","Jeri H Redlinger","173 Friar StreetReading","29208",73.0,"M","married","pensioner",32.0,"YES","NO","YES","NO",24112.00,101601,"CHECKING","jeri_redlinger@aol.com","30210111161229",NULL,"808-555-3531","MQ",NULL
+"1602","Miyim M Arcangel","173 Friar Street 173 Friar Street Reading","99523-2764",59.0,"F","married","inactive",30.0,"YES","NO","YES","NO",15677.55,101602,"CHECKING","miyim.arcangel@web.de","4024 0071 2159 5481","916-555-1548","303-555-3886","CH",NULL
+"1602","Miyim M Arcangel","173 Friar Street 173 Friar Street Reading","99523-2764",59.0,"F","married","inactive",30.0,"YES","NO","YES","NO",42387.45,201602,"SAVINGS","miyim.arcangel@web.de","4024 0071 2159 5481","916-555-1548","303-555-3886","CH",NULL
+"1603","Ofelia G Miyauchi","Chandler`s FordEastleigh","78756-3216",50.0,"M","single","executives,self-employed",21.0,"NO","YES","NO","YES",4648.00,101603,"CHECKING","ofelia_m@msl.org","5285696282092972","904-555-7284",NULL,"IT","617-555-3241"
+"1604","Shelley Z Lawrence","DroitwichWorcester","78028-2709",41.0,"F","married","employee",26.0,"NO","NO","YES","NO",8615.00,101604,"CHECKING","shelley_lawrence@de.ibm.com","3528-3095-1856-2063","919-555-6224","808-555-7506","BY","615-555-5739"
+"1605","Robt R Ewing","STAFFA ROADLONDON","33932",72.0,"M","single","worker",27.0,"NO","NO","NO","NO",10021.59,101605,"SAVINGS","ewing@de.ibm.com","5383908528354962","360-555-6105","401-555-5080","QA","302-555-3375"
+"1605","Robt R Ewing","STAFFA ROADLONDON","33932",72.0,"M","single","worker",27.0,"NO","NO","NO","NO",27095.41,201605,"CHECKING","ewing@de.ibm.com","5383908528354962","360-555-6105","401-555-5080","QA","302-555-3375"
+"1606","Margreta Q Major","TamworthStaffs","33762-4933",62.0,"F","widowed","pensioner",1.0,"NO","NO","NO","YES",28450.00,101606,"SAVINGS","major@icloud.com","30310111161029","401-555-3077","225-555-2687","PH","major@t-online.de"
+"1607","Darryl E Read","NE 22 7 AANORTHUMBERLAND","0",71.0,"F","married","pensioner",20.0,"YES","NO","YES","YES",52266.00,101607,"SAVINGS","darryl_read@cicn.gov","5383908528354962","919-555-5740",NULL,"CZ","804-555-5268"
+"1608","Charles T Archer","Bellbrook ParkUckfield","85339-1777",76.0,"M","married","inactive",10.0,"NO","NO","YES","NO",29859.84,101608,"SAVINGS","charles_a@ibm.com","4024 0071 2159 5481","401-555-6122",NULL,"AQ","717-555-5402"
+"1608","Charles T Archer","Bellbrook ParkUckfield","85339-1777",76.0,"M","married","inactive",10.0,"NO","NO","YES","NO",80732.16,201608,"CHECKING","charles_a@ibm.com","4024 0071 2159 5481","401-555-6122",NULL,"AQ","717-555-5402"
+"1609","Ramona I Holden","IDOTTSVAGEN 7SWEDEN","71134",60.0,"M","married","pensioner",25.0,"NO","NO","NO","YES",7228.00,101609,"CHECKING","ramonaholden@blue.com","4146-6643-9004-5458","614-555-3324","502-555-7822","UG","417-555-688"
+"1610","Omid E Kerns","--------------------","0",20.0,"F","single","inactive",3.0,"NO","NO","NO","YES",1390.00,101610,"CHECKING","kerns@de.ibm.com","3400 000000 00009","518-555-3168","307-555-9628","FK","916-555-9610"
+"1611","Tom M Augustine","Barkby RoadLeicester","31008",44.0,"M","single","craftsmen, storekeepers",14.0,"NO","NO","YES","YES",2654.64,101611,"CHECKING","tom_augustine@yahoo.com","4024-0071-2159-5481","406-555-584",NULL,"BM","tom.augustine@gmx.net"
+"1611","Tom M Augustine","Barkby RoadLeicester","31008",44.0,"M","single","craftsmen, storekeepers",14.0,"NO","NO","YES","YES",7177.36,201611,"SAVINGS","tom_augustine@yahoo.com","4024-0071-2159-5481","406-555-584",NULL,"BM","tom.augustine@gmx.net"
+"1612","Jas S Antunes","Green LaneHeywood","78956-1353",73.0,"M","married","inactive",27.0,"NO","NO","YES","NO",11994.00,101612,"CHECKING","jas.antunes@web.de","30310111161029","804-555-2167","406-555-1196","KZ","615-555-8050"
+"1613","Georganne F Furgason","Forrest RoadMiddx","93267-0942",70.0,"M","married","pensioner",24.0,"NO","NO","YES","NO",22831.00,101613,"CHECKING",NULL,"6220264390045758","515-555-1188","225-555-5673","ST",NULL
+"1614","Elsa Z Hargreaves","GarnantAmmanford","98350",31.0,"M","married","worker",4.0,"NO","YES","YES","NO",28.62,101614,"SAVINGS","ehargreaves@gmail.com",NULL,"808-555-637","803-555-2343","GU","elsa_h@t-online.de"
+"1614","Elsa Z Hargreaves","GarnantAmmanford","98350",31.0,"M","married","worker",4.0,"NO","YES","YES","NO",77.38,201614,"CHECKING","ehargreaves@gmail.com",NULL,"808-555-637","803-555-2343","GU","elsa_h@t-online.de"
+"1615","Emil B Willcock","STAFFORD PARK 4SHROPSHIRE","86406-6159",59.0,"M","single","worker",7.0,"YES","NO","NO","YES",7249.00,101615,"SAVINGS","emil_w@gmx.net","3528-3095-1856-2063","603-555-4379",NULL,"ST","502-555-2481"
+"1616","Roseanne H Tuttas","178/188 Great South West Road 178/188 Great South West Road Hounslow","27698-0001",65.0,"F","married","inactive",10.0,"NO","NO","NO","YES",6321.00,101616,"SAVINGS","roseanne.tuttas@gmx.net","3400 000000 00009","614-555-9752","401-555-2837","SN","tuttas@yahoo.com"
+"1617","Clifton Z Cary","HorsforthLeeds","98404-4508",91.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",4047.57,101617,"SAVINGS","clifton.cary@gmx.net","30210111161229","307-555-6129",NULL,"GA","505-555-9215"
+"1617","Clifton Z Cary","HorsforthLeeds","98404-4508",91.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",10943.43,201617,"CHECKING","clifton.cary@gmx.net","30210111161229","307-555-6129",NULL,"GA","505-555-9215"
+"1618","Benny F Kane","LympheHythe","80901",63.0,"M","married","pensioner",18.0,"NO","NO","YES","NO",81764.00,101618,"SAVINGS","benny_kane@blue.com","3530 1113 3330 0000","804-555-6063","385-555-1568","UG","775-555-1742"
+"1619","Chatherine N Sprung","AbergavennyGwent","32137-2415",33.0,"M","cohabitant","intermediate professions",8.0,"NO","NO","YES","YES",2456.00,101619,"CHECKING","chatherine_sprung@ccdef.net","5169799091854334","503-555-2264",NULL,"GP","sprung@cicn.gov"
+"1620","Seth W Bruns","DARTMOUTH ROADWEST MIDLANDS","85281-4961",60.0,"F","married","inactive",17.0,"NO","NO","YES","NO",6287.76,101620,"CHECKING","seth_bruns@blue.com","30310111161029","417-555-5100","808-555-6656","GE","seth.bruns@blue.com"
+"1620","Seth W Bruns","DARTMOUTH ROADWEST MIDLANDS","85281-4961",60.0,"F","married","inactive",17.0,"NO","NO","YES","NO",17000.23,201620,"SAVINGS","seth_bruns@blue.com","30310111161029","417-555-5100","808-555-6656","GE","seth.bruns@blue.com"
+"1621","Alberto Y Forrest","LympheHythe","30214-0604",71.0,"F","married","inactive",35.0,"YES","NO","NO","NO",-44.00,101621,"CHECKING","forrest@icloud.com","5285696282092972","302-555-3810","615-555-3144","GH","517-555-5462"
+"1622","Meir W Frizzell","Sanderson StreetSheffield","78028-2766",20.0,"M","single","inactive",1.0,"NO","NO","NO","YES",1735.00,101622,"CHECKING","frizzell@web.de",NULL,"334-555-2322",NULL,"MT","515-555-5669"
+"1623","Ron M Effinger","GoldthorpeRotherham","32206",63.0,"F","widowed","pensioner",20.0,"YES","NO","YES","YES",1436.94,101623,"CHECKING","ron_e@aol.com","5580977968891503","843-555-1834",NULL,"LA","601-555-1708"
+"1623","Ron M Effinger","GoldthorpeRotherham","32206",63.0,"F","widowed","pensioner",20.0,"YES","NO","YES","YES",3885.06,201623,"SAVINGS","ron_e@aol.com","5580977968891503","843-555-1834",NULL,"LA","601-555-1708"
+"1624","Dorsey T Barnett","----------Northampton","85260",37.0,"F","married","craftsmen, storekeepers",17.0,"NO","NO","NO","YES",6858.00,101624,"CHECKING","barnett@ibm.com","3530 1113 3330 0000","573-555-9135","405-555-3667","CV","barnett@gmail.com"
+"1625","Napoleon B Ackel","Berkswell RoadMeriden","802",72.0,"M","married","pensioner",16.0,"NO","NO","YES","YES",59778.00,101625,"CHECKING","napoleon.ackel@msn.com","5285696282092972","843-555-3225","208-555-9759","PT","804-555-558"
+"1626","Frances I Kerr","91/95 PEREGRINE ROADHAINAULT ILFORD ESSE","80901",23.0,"F","single","executives,self-employed",0.0,"NO","YES","NO","YES",628.02,101626,"CHECKING","fkerr@ccdef.net","213130951856200","916-555-9280","334-555-4432","UA","401-555-9719"
+"1626","Frances I Kerr","91/95 PEREGRINE ROADHAINAULT ILFORD ESSE","80901",23.0,"F","single","executives,self-employed",0.0,"NO","YES","NO","YES",1697.98,201626,"SAVINGS","fkerr@ccdef.net","213130951856200","916-555-9280","334-555-4432","UA","401-555-9719"
+"1627","Celes Y Torres","Headlands RoadLiversedge","53278-0001",26.0,"F","single","employee",2.0,"NO","NO","NO","YES",4186.00,101627,"SAVINGS","celes_torres@t-online.de","30111111161229","916-555-3705",NULL,"GB","617-555-1360"
+"1628","Maryann L Ehrenreich","Springhead Enterprise ParkNorthfleet","47111",57.0,"F","married","inactive",21.0,"NO","NO","YES","NO",4679.00,101628,"SAVINGS","maryann_e@ibm.com","4146 6643 9004 5458","602-555-3489","651-555-2609","NZ","614-555-8858"
+"1629","Brandon B Ho","--------------------","0",19.0,"M","single","inactive",3.0,"NO","YES","NO","YES",96.66,101629,"SAVINGS","ho@aol.com","4024 0071 2159 5481","609-555-2401",NULL,"DE",NULL
+"1629","Brandon B Ho","--------------------","0",19.0,"M","single","inactive",3.0,"NO","YES","NO","YES",261.34,201629,"CHECKING","ho@aol.com","4024 0071 2159 5481","609-555-2401",NULL,"DE",NULL
+"1630","Emily H Litthuanian","5 Warf LaneLeicester","36066",33.0,"M","married","worker",1.0,"NO","YES","NO","YES",1846.00,101630,"SAVINGS","emily_l@aol.com","3400 000000 00009","614-555-9051","334-555-7455","CV","208-555-3618"
+"1631","Joan C Hamilton","STAFFORD PARK 4SHROPSHIRE","78954-0089",26.0,"M","single","inactive",2.0,"NO","YES","NO","YES",4411.00,101631,"SAVINGS","hamilton@ibm.com","5285696282092972","307-555-2439","402-555-3912","PM","417-555-6540"
+"1632","Dana I Zerr","----------Bracknell","30024",61.0,"F","married","pensioner",11.0,"NO","NO","YES","NO",924.21,101632,"CHECKING","zerr@gmx.net","30310111161029","608-555-708","208-555-4948","TH","717-555-3869"
+"1632","Dana I Zerr","----------Bracknell","30024",61.0,"F","married","pensioner",11.0,"NO","NO","YES","NO",2498.79,201632,"SAVINGS","zerr@gmx.net","30310111161029","608-555-708","208-555-4948","TH","717-555-3869"
+"1633","Moise U Alparaz","Bishops StortfordHertfordshire","28677-5643",76.0,"F","married","inactive",0.0,"YES","NO","NO","NO",9990.00,101633,"CHECKING","moise_a@ccdef.net","5169-7990-9185-4334","843-555-5574","614-555-9230","PA","919-555-7729"
+"1634","Royce P Pixler","Halesfield 2Telford","80901",65.0,"F","married","inactive",28.0,"YES","NO","YES","NO",32778.00,101634,"CHECKING","royce_p@icloud.com","5423111111111111","404-555-2800",NULL,"PW","406-555-5274"
+"1635","Adriane J Lyons","WOODILEE INDUSTRIAL ESTATEGLASGOW","37209-1050",72.0,"F","married","inactive",17.0,"NO","NO","NO","NO",2294.73,101635,"CHECKING",NULL,"5169-7990-9185-4334","850-555-7966",NULL,"CI","360-555-3589"
+"1635","Adriane J Lyons","WOODILEE INDUSTRIAL ESTATEGLASGOW","37209-1050",72.0,"F","married","inactive",17.0,"NO","NO","NO","NO",6204.26,201635,"SAVINGS",NULL,"5169-7990-9185-4334","850-555-7966",NULL,"CI","360-555-3589"
+"1636","Bradley N Lopez","----------SWANSEA","33850-1447",36.0,"F","married","inactive",8.0,"NO","NO","YES","NO",10083.00,101636,"CHECKING","lopez@blue.com","3400-000000-00009","317-555-1239",NULL,"RE","785-555-2986"
+"1637","Gini Q Margeson","NEWCASTLE-UNDER-LYMESTAFFORDSHIRE ST5 9B","33027-2936",47.0,"F","married","inactive",16.0,"NO","NO","YES","NO",39770.00,101637,"CHECKING","gini_m@ibm.com","36111111111111","615-555-8877","860-555-8074","SZ","804-555-7416"
+"1638","Ye W Di Santo","Lower High StreetWednesbury","80901",88.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",14861.07,101638,"CHECKING","ysanto@aol.com","30011111111119","401-555-1658","303-555-3467","ET","512-555-8345"
+"1638","Ye W Di Santo","Lower High StreetWednesbury","80901",88.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",40179.93,201638,"SAVINGS","ysanto@aol.com","30011111111119","401-555-1658","303-555-3467","ET","512-555-8345"
+"1639","Bennie T Kroeplin","CRENDON STREETBUCKS","85750-9670",61.0,"M","married","worker",16.0,"NO","NO","NO","YES",28058.00,101639,"CHECKING","benniekroeplin@t-online.de",NULL,"850-555-4596",NULL,"HN","860-555-8655"
+"1640","Willie O Bower","North LaneAldershot","07606-1412",45.0,"M","married","craftsmen, storekeepers",0.0,"YES","YES","YES","YES",0.00,101640,"CHECKING","willie_bower@t-online.de","38111111111119","804-555-1555",NULL,"ES","904-555-5859"
+"1641","Donna I Crowder","69 MOLAND STRRETBIRMINGHAM","99523-2764",72.0,"F","widowed","inactive",18.0,"YES","NO","NO","NO",6201.90,101641,"SAVINGS","donna_c@msn.com","5580977968891503","701-555-8108","651-555-450","SB",NULL
+"1641","Donna I Crowder","69 MOLAND STRRETBIRMINGHAM","99523-2764",72.0,"F","widowed","inactive",18.0,"YES","NO","NO","NO",16768.10,201641,"CHECKING","donna_c@msn.com","5580977968891503","701-555-8108","651-555-450","SB",NULL
+"1642","Aziza F Mills","LOWER MILE HOUSE LANENEWCASTLE UNDER LYME","80901",37.0,"F","single","intermediate professions",18.0,"NO","NO","NO","NO",34385.00,101642,"SAVINGS","mills@ibm.com","4024007121595481","916-555-7933","860-555-2597","AS","601-555-2471"
+"1643","Whitney K Aleksintser","White Horse Business ParkTrowbridge","45331",61.0,"M","married","executives,self-employed",30.0,"NO","NO","NO","NO",7146.00,101643,"SAVINGS","whitney_a@yahoo.com","38111111111119","651-555-7514",NULL,"GL","405-555-8292"
+"1644","Ethem L Nylund","----------WARWICK","78028-4504",37.0,"F","married","worker",20.0,"NO","NO","YES","NO",59.13,101644,"SAVINGS","nylund@aol.com","340000000000009","202-555-7845","602-555-9917","RU","502-555-3421"
+"1644","Ethem L Nylund","----------WARWICK","78028-4504",37.0,"F","married","worker",20.0,"NO","NO","YES","NO",159.87,201644,"CHECKING","nylund@aol.com","340000000000009","202-555-7845","602-555-9917","RU","502-555-3421"
+"1645","Cammile G Affinito","55 London RoadSt Albans","32805-6182",20.0,"F","single","inactive",6.0,"NO","YES","NO","YES",3172.00,101645,"SAVINGS","cammile_affinito@t-online.de","5285696282092972","208-555-7514",NULL,"HU","affinito@gmx.net"
+"1646","Tracy K Dufault","KETTLES WOOD DRIVEBIRMINGHAM","85267-4944",76.0,"F","divorced","pensioner",11.0,"NO","NO","NO","YES",16825.00,101646,"CHECKING","dufault@t-online.de","4024 0071 2159 5481","505-555-2305",NULL,"BZ",NULL
+"1647","Bailey L Santillana","WittonBirmingham","91406-1923",26.0,"F","cohabitant","intermediate professions",1.0,"NO","NO","YES","NO",120.15,101647,"CHECKING",NULL,"213130951856200","317-555-1076","401-555-219","VN","417-555-8620"
+"1647","Bailey L Santillana","WittonBirmingham","91406-1923",26.0,"F","cohabitant","intermediate professions",1.0,"NO","NO","YES","NO",324.84,201647,"SAVINGS",NULL,"213130951856200","317-555-1076","401-555-219","VN","417-555-8620"
+"1648","Guss R Haberle","SmallfieldHorley","27587-9693",76.0,"F","widowed","inactive",5.0,"NO","NO","NO","NO",7398.00,101648,"CHECKING","ghaberle@ccdef.net","30411111111111","360-555-4128","804-555-2328","CU","919-555-1479"
+"1649","Theodor N Aguire","13Earlstrees Road CorbyNorthants NN 17 4NP","76011-7603",86.0,"M","married","pensioner",19.0,"YES","NO","YES","YES",11893.00,101649,"CHECKING","theodor_aguire@ccdef.net","36111111111111","302-555-9381","405-555-4839","YT","406-555-9128"
+"1650","Andy U Minarsky","AbergavennyGwent","33311-5603",76.0,"M","married","pensioner",20.0,"YES","NO","YES","NO",4794.66,101650,"CHECKING","minarsky@cicn.gov","5169799091854334","804-555-5514","804-555-8450","FJ","aminarsky@msl.org"
+"1650","Andy U Minarsky","AbergavennyGwent","33311-5603",76.0,"M","married","pensioner",20.0,"YES","NO","YES","NO",12963.34,201650,"SAVINGS","minarsky@cicn.gov","5169799091854334","804-555-5514","804-555-8450","FJ","aminarsky@msl.org"
+"1651","Warren W Granger","Birmingham RoadRedditch","33782-4704",79.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",102680.00,101651,"CHECKING","warren.granger@gmail.com","5580977968891503","843-555-8596","303-555-3586","GR","609-555-1382"
+"1652","Francezka K Mesa","Garstang Garstang Preston","28289-0241",20.0,"M","single","inactive",3.0,"NO","NO","NO","YES",2252.00,101652,"CHECKING","mesa@web.de","3528-3095-1856-2063","651-555-3697",NULL,"GI",NULL
+"1653","Felimon M Dale","Washington RoadSunderland","72204-2208",23.0,"M","single","employee",4.0,"NO","YES","NO","YES",9943.83,101653,"CHECKING","fdale@ccdef.net","4146-6643-9004-5458","919-555-6108","904-555-2464","PW","602-555-6054"
+"1653","Felimon M Dale","Washington RoadSunderland","72204-2208",23.0,"M","single","employee",4.0,"NO","YES","NO","YES",26885.17,201653,"SAVINGS","fdale@ccdef.net","4146-6643-9004-5458","919-555-6108","904-555-2464","PW","602-555-6054"
+"1654","Claude S Fitzgerald","----------Telford","77378-8793",53.0,"M","married","worker",15.0,"NO","NO","YES","NO",5579.00,101654,"CHECKING","claude_fitzgerald@aol.com","180030951856201","334-555-2940",NULL,"CU","401-555-60"
+"1655","Rhonda Y Heinen","GarstangPreston","22031",41.0,"F","widowed","employee",18.0,"YES","NO","NO","YES",2818.00,101655,"CHECKING",NULL,"30111111161229","601-555-4326",NULL,"DM","208-555-5730"
+"1656","Carryl Q Pash","King StreetBedworth","33311",72.0,"M","married","inactive",13.0,"NO","NO","YES","YES",1172.88,101656,"CHECKING","carryl.pash@aol.com","6220264390045758","503-555-8214","775-555-6116","NZ","225-555-5731"
+"1656","Carryl Q Pash","King StreetBedworth","33311",72.0,"M","married","inactive",13.0,"NO","NO","YES","YES",3171.12,201656,"SAVINGS","carryl.pash@aol.com","6220264390045758","503-555-8214","775-555-6116","NZ","225-555-5731"
+"1657","Maury C Graff","Green LaneHeywood","98006-2121",59.0,"M","single","farmer",11.0,"NO","NO","NO","YES",46204.00,101657,"CHECKING","graff@aol.com","5169799091854334","505-555-1172",NULL,"CN","334-555-423"
+"1658","Norio P Munsil","WittonBirmingham","98324-0296",42.0,"M","single","worker",20.0,"NO","NO","NO","NO",17262.00,101658,"CHECKING","norio_munsil@gmx.net","3400-000000-00009","775-555-5550",NULL,"MW","916-555-2023"
+"1659","Sam U Radunsky","Hall GreenBirmingham","77036-7418",23.0,"M","single","inactive",7.0,"NO","YES","NO","YES",-46.71,101659,"CHECKING","sam_radunsky@ccdef.net","4146664390045458","608-555-7561","405-555-6650","JO","502-555-219"
+"1659","Sam U Radunsky","Hall GreenBirmingham","77036-7418",23.0,"M","single","inactive",7.0,"NO","YES","NO","YES",-126.28,201659,"SAVINGS","sam_radunsky@ccdef.net","4146664390045458","608-555-7561","405-555-6650","JO","502-555-219"
+"1660","Reid I Cavins","FACTORY ROADUPTON - POOLE - DORS","86301-3214",40.0,"M","married","worker",21.0,"NO","YES","YES","YES",2590.00,101660,"CHECKING","cavins@blue.com","378282246310005","850-555-2345","208-555-444","AM","651-555-7387"
+"1661","Pam A Cehula","RIVERSIDE WAYCAMBERLEY SURREY GU","34104",26.0,"M","single","inactive",7.0,"NO","NO","NO","YES",27354.00,101661,"SAVINGS","pam_c@aol.com","38111111111119","919-555-8939",NULL,"OM","207-555-8459"
+"1662","Skeets A Chachanashvili","Merse RoadRedditch","22031",46.0,"F","married","intermediate professions",23.0,"YES","NO","YES","NO",205.74,101662,"SAVINGS","skeets_chachanashvili@gmx.net",NULL,"334-555-6146",NULL,"DZ","512-555-1848"
+"1662","Skeets A Chachanashvili","Merse RoadRedditch","22031",46.0,"F","married","intermediate professions",23.0,"YES","NO","YES","NO",556.26,201662,"CHECKING","skeets_chachanashvili@gmx.net",NULL,"334-555-6146",NULL,"DZ","512-555-1848"
+"1663","Marc L Huez","STAFFORD PARK 4SHROPSHIRE","27604",64.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",20585.00,101663,"SAVINGS","marc_huez@ibm.com","6520224090045455","701-555-9617","775-555-3421","CX","huez@de.ibm.com"
+"1664","Kenneth K Hewitt","RAMSBOTTOMLANCS.","99523-2764",21.0,"F","single","inactive",5.0,"NO","NO","NO","YES",1229.00,101664,"SAVINGS","kenneth.hewitt@web.de","5520111111111121","617-555-4993","614-555-295","BD","401-555-5112"
+"1665","Dick I Eisner","Hail WestonCAMBS","98580",76.0,"F","widowed","pensioner",20.0,"YES","NO","NO","YES",6949.26,101665,"SAVINGS","eisner@web.de","6220264390045758","603-555-24","808-555-6445","SI","601-555-4159"
+"1665","Dick I Eisner","Hail WestonCAMBS","98580",76.0,"F","widowed","pensioner",20.0,"YES","NO","NO","YES",18788.73,201665,"CHECKING","eisner@web.de","6220264390045758","603-555-24","808-555-6445","SI","601-555-4159"
+"1666","Jacqueline W Madsen","IDOTTSVAGEN 7SWEDEN","71134",25.0,"M","single","inactive",8.0,"NO","YES","NO","YES",3566.00,101666,"CHECKING","jacqueline_m@cicn.gov","3400 000000 00009","843-555-9296",NULL,"MW","860-555-8376"
+"1667","Jerald Z Allen","Hail WestonCAMBS","75238-1395",82.0,"F","widowed","inactive",12.0,"NO","NO","YES","NO",13455.00,101667,"CHECKING","jerald_allen@msn.com","6220264390045758","803-555-3807",NULL,"MZ","allen@aol.com"
+"1668","Myrna A Pham","Canal RoadLeeds","19440-1557",69.0,"M","single","pensioner",26.0,"NO","NO","NO","NO",6438.15,101668,"CHECKING","mpham@ibm.com","5580977968891503","360-555-7012",NULL,"CD","202-555-9480"
+"1668","Myrna A Pham","Canal RoadLeeds","19440-1557",69.0,"M","single","pensioner",26.0,"NO","NO","NO","NO",17406.85,201668,"SAVINGS","mpham@ibm.com","5580977968891503","360-555-7012",NULL,"CD","202-555-9480"
+"1669","Hurston D Eisele","Tame RoadBirmingham","75091-2146",33.0,"M","married","worker",12.0,"NO","YES","YES","NO",2565.00,101669,"CHECKING","hurston.eisele@t-online.de","5423111111111111","402-555-9571","518-555-8763","NA","hurston.eisele@icloud.com"
+"1670","Darryl J Morey","5 Oxford RoadNewbury","86401-9440",77.0,"F","married","inactive",19.0,"NO","NO","YES","NO",165821.00,101670,"SAVINGS","darryl_m@web.de","4024 0071 2159 5481","904-555-7968",NULL,"EC","302-555-9551"
+"1671","Misty X Hasten","WATCHMEADHERTFORDSHIRE","29512",76.0,"M","separated","pensioner",37.0,"NO","NO","NO","NO",1929.69,101671,"SAVINGS","misty_h@yahoo.com",NULL,"860-555-8075",NULL,"CA","609-555-4242"
+"1671","Misty X Hasten","WATCHMEADHERTFORDSHIRE","29512",76.0,"M","separated","pensioner",37.0,"NO","NO","NO","NO",5217.30,201671,"CHECKING","misty_h@yahoo.com",NULL,"860-555-8075",NULL,"CA","609-555-4242"
+"1754","Shelley I Baxter","LlantrisantPontyclun","75381-9060",47.0,"F","single","inactive",3.0,"NO","YES","NO","YES",-5.00,101754,"CHECKING","shelleybaxter@gmx.net","5462522444922689","785-555-1261",NULL,"RO","775-555-5290"
+"1672","Gilmore W Beil","Small HeathBirmingham","98324",48.0,"F","divorced","worker",8.0,"NO","NO","YES","NO",2677.00,101672,"SAVINGS","gilmore_b@icloud.com","4024007121595481","614-555-7414","601-555-7793","YE","303-555-553"
+"1673","Philippe X Kunze","17 Victoria RoadStaffs","85242-4092",54.0,"M","married","inactive",27.0,"NO","NO","YES","NO",43933.00,101673,"SAVINGS","philippe_kunze@msl.org","3528-3095-1856-2063","850-555-1909","307-555-7847","HK","303-555-8095"
+"1674","Yvette Q Bouchard","1 CHURCH ROWKENT","67846-5669",68.0,"F","married","inactive",2.0,"NO","NO","YES","YES",28300.59,101674,"SAVINGS","ybouchard@msl.org","4146 6643 9004 5458","512-555-5411",NULL,"FX","bouchard@web.de"
+"1674","Yvette Q Bouchard","1 CHURCH ROWKENT","67846-5669",68.0,"F","married","inactive",2.0,"NO","NO","YES","YES",76516.41,201674,"CHECKING","ybouchard@msl.org","4146 6643 9004 5458","512-555-5411",NULL,"FX","bouchard@web.de"
+"1675","Cleaburne M Amos","Guildhall LaneLeicester","77356",40.0,"M","separated","worker",7.0,"NO","NO","NO","YES",3438.00,101675,"SAVINGS",NULL,"3400 000000 00009","617-555-4587","907-555-9662","SI","605-555-7071"
+"1676","Zipporah N Lucey","FOUNTAYNE ROADLONDON","86324-2661",26.0,"M","single","inactive",7.0,"YES","NO","NO","YES",20015.00,101676,"SAVINGS","zipporah_l@web.de","4146664390045458","515-555-9258",NULL,"TD","603-555-2008"
+"1677","Herbert D Westling","Brickyard RoadWalsall","80911",27.0,"M","single","worker",3.0,"NO","YES","NO","YES",62.37,101677,"SAVINGS","westling@cicn.gov","5383908528354962","808-555-7358","651-555-7704","SA",NULL
+"1677","Herbert D Westling","Brickyard RoadWalsall","80911",27.0,"M","single","worker",3.0,"NO","YES","NO","YES",168.63,201677,"CHECKING","westling@cicn.gov","5383908528354962","808-555-7358","651-555-7704","SA",NULL
+"1678","Lauren K Kelley","Boyatt WoodEastleigh","98055-2307",72.0,"F","widowed","pensioner",22.0,"NO","NO","NO","NO",41096.00,101678,"CHECKING","lauren_kelley@t-online.de","3528-3095-1856-2063","208-555-899",NULL,"LS","lauren_kelley@de.ibm.com"
+"1679","Kameron N Aman","5 Warf LaneLeicester","86340",68.0,"F","married","pensioner",21.0,"NO","NO","YES","YES",15124.00,101679,"CHECKING","kaman@gmail.com","4024 0071 2159 5481","717-555-1632",NULL,"LY","417-555-2521"
+"1680","Wes X Wohl","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27889-7204",70.0,"F","widowed","inactive",10.0,"NO","NO","NO","YES",394.20,101680,"CHECKING","wes.wohl@msn.com","5520111111111121","802-555-1338","505-555-3217","NE","334-555-764"
+"1680","Wes X Wohl","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27889-7204",70.0,"F","widowed","inactive",10.0,"NO","NO","NO","YES",1065.80,201680,"SAVINGS","wes.wohl@msn.com","5520111111111121","802-555-1338","505-555-3217","NE","334-555-764"
+"1681","Carlo T Farabee","Nottingham RoadBelper","85706-4911",72.0,"F","widowed","pensioner",13.0,"YES","NO","NO","NO",80199.00,101681,"CHECKING","carlo_f@yahoo.com","4146-6643-9004-5458","360-555-2151",NULL,"NF","317-555-4136"
+"1682","Phoumy E Hughes","Sharston RoadManchester","86303-5569",76.0,"F","married","inactive",23.0,"YES","NO","NO","NO",62200.00,101682,"CHECKING","hughes@web.de","3400 000000 00009","785-555-7106",NULL,"BZ","512-555-6295"
+"1683","Rosalie Z Dave","Benton LaneNEWCASTLE Upon Tyne","78028-4811",62.0,"M","married","pensioner",22.0,"NO","YES","YES","YES",497.07,101683,"CHECKING","rosaliedave@yahoo.com","30210111161229","601-555-3070",NULL,"SK","207-555-3619"
+"1683","Rosalie Z Dave","Benton LaneNEWCASTLE Upon Tyne","78028-4811",62.0,"M","married","pensioner",22.0,"NO","YES","YES","YES",1343.93,201683,"SAVINGS","rosaliedave@yahoo.com","30210111161229","601-555-3070",NULL,"SK","207-555-3619"
+"1684","Min M Broda","35 Livery StreetBirmingham","75397-1342",75.0,"F","widowed","inactive",28.0,"YES","NO","NO","NO",24108.00,101684,"SAVINGS","broda@gmx.net","30210111161229","505-555-2090","804-555-8083","LR","916-555-6559"
+"1685","Garnet W Neyens","148 Edmund StreetBirmingham","33441-2199",24.0,"M","single","inactive",14.0,"YES","NO","NO","YES",6396.00,101685,"SAVINGS","garnet_n@msl.org","213130951856200","603-555-2129","401-555-3399","IE","503-555-8584"
+"1686","Alfredo H Howell","10 Mordaunt RoadLondon","98382-7456",36.0,"M","married","executives,self-employed",5.0,"NO","YES","YES","YES",1724.49,101686,"SAVINGS","howell@yahoo.com","213130951856200","843-555-1537","860-555-4998","IS","512-555-5097"
+"1686","Alfredo H Howell","10 Mordaunt RoadLondon","98382-7456",36.0,"M","married","executives,self-employed",5.0,"NO","YES","YES","YES",4662.51,201686,"CHECKING","howell@yahoo.com","213130951856200","843-555-1537","860-555-4998","IS","512-555-5097"
+"1687","Winford B Flin","SmallfieldHorley","85635",64.0,"M","single","pensioner",28.0,"YES","NO","NO","NO",13838.00,101687,"SAVINGS","winford_flin@msl.org","5462522444922689","785-555-5819",NULL,"CR","flin@de.ibm.com"
+"1688","Brantley V Hemmingson","HatfieldHertfordshire","80917-5710",43.0,"F","single","employee",7.0,"NO","YES","NO","YES",-90.00,101688,"SAVINGS","hemmingson@ibm.com","38111111111119","843-555-7210",NULL,"MA","brantley_hemmingson@blue.com"
+"1689","Lou J Kindred","Church RoadBristol","33803-8359",20.0,"M","single","inactive",3.0,"NO","NO","NO","NO",238.14,101689,"SAVINGS","lou_kindred@t-online.de","5580977968891503","804-555-5723",NULL,"KP","502-555-8345"
+"1689","Lou J Kindred","Church RoadBristol","33803-8359",20.0,"M","single","inactive",3.0,"NO","NO","NO","NO",643.86,201689,"CHECKING","lou_kindred@t-online.de","5580977968891503","804-555-5723",NULL,"KP","502-555-8345"
+"1690","Klaus S Teraberry","KING STREETWARWICKSHIRE","33912",56.0,"M","single","farmer",15.0,"NO","NO","YES","NO",74017.00,101690,"SAVINGS","teraberry@yahoo.com","38111111111119","603-555-8467","601-555-1326","DJ","klaus_teraberry@ibm.com"
+"1691","Tilly U Mcgill","224 Marsh HillBirmingham","32703-8504",17.0,"M","child","inactive",1.0,"NO","NO","NO","YES",-643.00,101691,"SAVINGS","tmcgill@ccdef.net","30210111161229","605-555-7316",NULL,"SL","605-555-7275"
+"1692","Micahel X Jesten","STAFFORD PARK 15TELFORD","98365-9634",42.0,"F","married","intermediate professions",24.0,"NO","NO","YES","YES",3461.13,101692,"SAVINGS","jesten@ccdef.net",NULL,"603-555-4028","651-555-3281","BV","602-555-9419"
+"1692","Micahel X Jesten","STAFFORD PARK 15TELFORD","98365-9634",42.0,"F","married","intermediate professions",24.0,"NO","NO","YES","YES",9357.86,201692,"CHECKING","jesten@ccdef.net",NULL,"603-555-4028","651-555-3281","BV","602-555-9419"
+"1693","Rupert U Bagley","Industrial Est.Witney","70809",34.0,"F","married","inactive",4.0,"NO","YES","NO","YES",2272.00,101693,"CHECKING","rbagley@ibm.com","36111111111111","802-555-4754",NULL,"KY","208-555-7159"
+"1694","Klaus T Mcnabb","WittonBirmingham","77282-0285",62.0,"M","married","pensioner",13.0,"NO","NO","YES","NO",19797.00,101694,"CHECKING","klaus_mcnabb@ibm.com","30011111111119","208-555-1005","717-555-7379","MC","808-555-2337"
+"1695","Julius I Schmelzer","WittonBirmingham","30030",68.0,"F","married","inactive",18.0,"YES","NO","YES","NO",3044.25,101695,"CHECKING","jschmelzer@yahoo.com","30011111111119","404-555-5053","302-555-3147","SE","701-555-799"
+"1695","Julius I Schmelzer","WittonBirmingham","30030",68.0,"F","married","inactive",18.0,"YES","NO","YES","NO",8230.75,201695,"SAVINGS","jschmelzer@yahoo.com","30011111111119","404-555-5053","302-555-3147","SE","701-555-799"
+"1696","Suzan E Flakes","STAND PARK / SHEFFIELD ROAD STAND PARK / SHEFFIELD ROAD CHESTERFIELD DERBY","85251-1255",85.0,"M","married","pensioner",29.0,"NO","NO","YES","NO",42275.00,101696,"CHECKING","suzan_f@aol.com","5169-7990-9185-4334","503-555-4390",NULL,"MA","360-555-7513"
+"1697","Terri B Duvall","Birmingham RoadRedditch","32819-8499",68.0,"F","widowed","inactive",20.0,"YES","NO","NO","NO",2135.00,101697,"CHECKING","tduvall@ibm.com","3530 1113 3330 0000","916-555-8279",NULL,"ZM","601-555-3680"
+"1698","Debbie P Mcgee","STOCKPORTENGLAND","98382-8005",66.0,"F","married","farmer",29.0,"YES","NO","YES","NO",2756.97,101698,"CHECKING","debbie_mcgee@de.ibm.com","5423111111111111","615-555-2012","505-555-6987","GI","517-555-1428"
+"1698","Debbie P Mcgee","STOCKPORTENGLAND","98382-8005",66.0,"F","married","farmer",29.0,"YES","NO","YES","NO",7454.03,201698,"SAVINGS","debbie_mcgee@de.ibm.com","5423111111111111","615-555-2012","505-555-6987","GI","517-555-1428"
+"1699","Homer R Magliozzi","WEST WORLD WESTGATELONDON","1441",24.0,"M","single","employee",8.0,"NO","YES","NO","YES",8625.00,101699,"SAVINGS","homer_m@gmx.net","4146664390045458",NULL,NULL,"VU","602-555-5456"
+"1700","Stuart H Schenbeck","CLARKE STREETDERBY","29577",55.0,"M","married","farmer",26.0,"NO","NO","YES","NO",38095.00,101700,"SAVINGS","schenbeck@msl.org","5169 7990 9185 4334","401-555-2836","904-555-3078","AL","stuart_s@msl.org"
+"1701","Shan L Leavitt","Hail WestonCAMBS","87571-1104",40.0,"F","married","inactive",12.0,"NO","YES","YES","YES",-43.20,101701,"SAVINGS","leavitt@ibm.com","5285696282092972","360-555-8096",NULL,"PY",NULL
+"1701","Shan L Leavitt","Hail WestonCAMBS","87571-1104",40.0,"F","married","inactive",12.0,"NO","YES","YES","YES",-116.80,201701,"CHECKING","leavitt@ibm.com","5285696282092972","360-555-8096",NULL,"PY",NULL
+"1702","Jules T Harper","Sherbourne DriveTilbrook Milton Keyn","37912-1397",44.0,"M","married","employee",20.0,"NO","NO","YES","NO",7678.00,101702,"SAVINGS","harper@ccdef.net","5169-7990-9185-4334","850-555-6779",NULL,"BA","503-555-9815"
+"1703","Edmund H Snow","9 Coneygre Industrial EstateTipton","29414-6484",50.0,"F","married","employee",14.0,"NO","YES","YES","NO",13007.00,101703,"SAVINGS","snow@icloud.com","3528-3095-1856-2063","406-555-7530","208-555-1203","AE","208-555-9087"
+"1704","Romulo V Wegener","WythenshaweManchester","29116-1143",67.0,"F","widowed","pensioner",16.0,"NO","NO","NO","YES",4071.60,101704,"SAVINGS","romulo.wegener@icloud.com","5383908528354962","617-555-4132","515-555-6446","SK","317-555-4815"
+"1704","Romulo V Wegener","WythenshaweManchester","29116-1143",67.0,"F","widowed","pensioner",16.0,"NO","NO","NO","YES",11008.40,201704,"CHECKING","romulo.wegener@icloud.com","5383908528354962","617-555-4132","515-555-6446","SK","317-555-4815"
+"1705","Janette H Nelsen","DentonManchester","32304",63.0,"F","married","pensioner",21.0,"NO","NO","YES","NO",19139.00,101705,"SAVINGS","janette_n@de.ibm.com","38111111111119","605-555-5773","502-555-1821","SA","401-555-6729"
+"1706","Myra S Moshir","Wigston Wigston Leicester","33773-5315",80.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",58045.00,101706,"SAVINGS","myra_moshir@t-online.de","3528-3095-1856-2063","517-555-7981",NULL,"RE","802-555-4327"
+"1707","Jon H Bickmore","SpondonDerby","77055-1208",77.0,"F","widowed","pensioner",17.0,"YES","NO","NO","YES",6166.80,101707,"SAVINGS","bickmore@web.de","4024007121595481","517-555-2795",NULL,"CN","614-555-5144"
+"1707","Jon H Bickmore","SpondonDerby","77055-1208",77.0,"F","widowed","pensioner",17.0,"YES","NO","NO","YES",16673.20,201707,"CHECKING","bickmore@web.de","4024007121595481","517-555-2795",NULL,"CN","614-555-5144"
+"1708","Rolly Z Stanske","69 MOLAND STRRETBIRMINGHAM","98362",64.0,"M","divorced","inactive",14.0,"NO","NO","NO","YES",4025.00,101708,"SAVINGS","rolly_s@ibm.com","4146664390045458","334-555-3650",NULL,"RE","404-555-4909"
+"1709","Mohammad Y Alcain","HAY HILL ROADBIRMINGHAM","72947-8504",26.0,"F","single","inactive",3.0,"NO","YES","NO","YES",-1204.00,101709,"SAVINGS","malcain@msl.org","5169-7990-9185-4334","503-555-6216","405-555-2128","NR",NULL
+"1710","Alva A Binkley","KETTLES WOOD DRIVEBIRMINGHAM","78070",82.0,"M","widowed","pensioner",23.0,"YES","NO","NO","NO",2609.01,101710,"SAVINGS","abinkley@icloud.com","30210111161229","417-555-7400",NULL,"US",NULL
+"1710","Alva A Binkley","KETTLES WOOD DRIVEBIRMINGHAM","78070",82.0,"M","widowed","pensioner",23.0,"YES","NO","NO","NO",7053.99,201710,"CHECKING","abinkley@icloud.com","30210111161229","417-555-7400",NULL,"US",NULL
+"1711","Trevor V Rodie","Leabrook RoadWEST MIDLANDS","32608-4748",50.0,"F","married","employee",26.0,"NO","NO","YES","NO",35930.00,101711,"SAVINGS","trevor_r@cicn.gov",NULL,"775-555-5151",NULL,"MZ","651-555-6501"
+"1712","Reva Q Schartow","MOUNTBATTEN HOUSE BASING VIEW MOUNTBATTEN HOUSE BASING VIEW BASINGSTOKE","85383-3236",67.0,"F","married","inactive",24.0,"NO","NO","YES","NO",271033.00,101712,"SAVINGS",NULL,"4024007121595481","617-555-3508","515-555-9401","DJ","reva_s@icloud.com"
+"1713","John Y Gerkin","White Horse Business ParkTrowbridge","46266",65.0,"F","married","inactive",36.0,"YES","NO","YES","YES",11958.03,101713,"SAVINGS","john_g@aol.com","3528-3095-1856-2063","603-555-2429",NULL,"PY","john_gerkin@aol.com"
+"1713","John Y Gerkin","White Horse Business ParkTrowbridge","46266",65.0,"F","married","inactive",36.0,"YES","NO","YES","YES",32330.96,201713,"CHECKING","john_g@aol.com","3528-3095-1856-2063","603-555-2429",NULL,"PY","john_gerkin@aol.com"
+"1714","Michele B Friesen","RAMSBOTTOMLANCS.","85374-7038",30.0,"M","married","employee",3.0,"NO","NO","YES","NO",731.00,101714,"CHECKING","michele.friesen@msn.com","36111111111111","517-555-2630","406-555-7504","MC","402-555-2323"
+"1715","Pedro N Wang","WimborneDorset","28787-9219",47.0,"F","married","inactive",23.0,"NO","NO","YES","NO",2561.00,101715,"CHECKING","wang@blue.com","3400-000000-00009","307-555-8436",NULL,"MK","651-555-1077"
+"1716","Hunkyi A Cantrell","WELWYN ELECTRONICS PARK BEDLINNORTHUMBERLAND","29402-0568",22.0,"F","single","inactive",6.0,"NO","NO","NO","YES",1835.46,101716,"CHECKING","cantrell@yahoo.com","378282246310005","385-555-4751",NULL,"GP","605-555-6337"
+"1716","Hunkyi A Cantrell","WELWYN ELECTRONICS PARK BEDLINNORTHUMBERLAND","29402-0568",22.0,"F","single","inactive",6.0,"NO","NO","NO","YES",4962.54,201716,"SAVINGS","cantrell@yahoo.com","378282246310005","385-555-4751",NULL,"GP","605-555-6337"
+"1717","Elvira N Wetzel","StirchleyBirmingham","77084-2815",27.0,"M","single","inactive",15.0,"YES","NO","YES","NO",7644.00,101717,"CHECKING","wetzel@ccdef.net","36111111111111","850-555-4882",NULL,"SV","517-555-5647"
+"1718","Gene H Torley","5 Warf LaneLeicester","77092-7028",71.0,"M","married","inactive",26.0,"NO","NO","NO","NO",13928.00,101718,"CHECKING","torley@msn.com","3400 000000 00009","405-555-5966",NULL,"AW","334-555-9131"
+"1719","Roselee D Mesa","Stretford Stretford Manchester","34104-7017",43.0,"F","married","farmer",23.0,"YES","NO","YES","NO",14454.72,101719,"SAVINGS","roselee_m@yahoo.com","5462522444922689","614-555-3853",NULL,"CX","401-555-4700"
+"1719","Roselee D Mesa","Stretford Stretford Manchester","34104-7017",43.0,"F","married","farmer",23.0,"YES","NO","YES","NO",39081.28,201719,"CHECKING","roselee_m@yahoo.com","5462522444922689","614-555-3853",NULL,"CX","401-555-4700"
+"1720","Meir N Wightman","Great Western WaySwindon","83864-6219",66.0,"M","married","employee",24.0,"YES","NO","YES","NO",75037.00,101720,"SAVINGS",NULL,"30411111111111","302-555-1982",NULL,"AG","808-555-2294"
+"1721","Carrine D Brigham","NorthgateAldridge","33527",32.0,"M","single","inactive",8.0,"NO","YES","NO","YES",265.00,101721,"SAVINGS","carrine_b@icloud.com",NULL,"202-555-2158",NULL,"MX","carrine_brigham@msl.org"
+"1722","Shawn O Arciga","105 Devonshire RoadLondon","31193-2134",43.0,"M","married","worker",10.0,"NO","NO","NO","YES",9997.56,101722,"SAVINGS","shawn_a@aol.com","4024-0071-2159-5481","802-555-6965",NULL,"MC","608-555-1447"
+"1722","Shawn O Arciga","105 Devonshire RoadLondon","31193-2134",43.0,"M","married","worker",10.0,"NO","NO","NO","YES",27030.44,201722,"CHECKING","shawn_a@aol.com","4024-0071-2159-5481","802-555-6965",NULL,"MC","608-555-1447"
+"1723","Rupert X Baumgartner","Talisman RoadBicester Oxon OX6 6J","34994-3953",85.0,"M","married","pensioner",26.0,"NO","NO","YES","NO",21086.00,101723,"SAVINGS","baumgartner@de.ibm.com","5169 7990 9185 4334","907-555-8024","907-555-9916","KE","515-555-103"
+"1724","Merle Q Lamb","Arlington Way Sundorne Retail Arlington Way Sundorne Retail Shrewsbury","63111",68.0,"M","married","pensioner",26.0,"YES","NO","NO","YES",36573.00,101724,"SAVINGS",NULL,"5383908528354962","904-555-4761","916-555-7446","SK",NULL
+"1725","Jesse T Drake","----------Kingswinford","86336-3804",73.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",10480.05,101725,"SAVINGS",NULL,"3400 000000 00009","907-555-4025",NULL,"KW","916-555-1472"
+"1725","Jesse T Drake","----------Kingswinford","86336-3804",73.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",28334.95,201725,"CHECKING",NULL,"3400 000000 00009","907-555-4025",NULL,"KW","916-555-1472"
+"1726","Lucas I Fraser","----------WOOTTON BASSET","98324",68.0,"M","single","pensioner",26.0,"YES","NO","NO","YES",15522.00,101726,"SAVINGS","lfraser@ccdef.net","30411111111111","360-555-4682","385-555-8153","HK","404-555-778"
+"1727","Amber H Fonder","9 Coneygre Industrial EstateTipton","29340-6536",50.0,"M","single","employee",11.0,"NO","NO","NO","NO",132770.00,101727,"SAVINGS","amber_fonder@de.ibm.com","30411111111111","615-555-432","518-555-2808","LS","208-555-2334"
+"1728","Easter W Cessna","HAY HILL ROADBIRMINGHAM","86001-6296",72.0,"F","married","inactive",26.0,"NO","NO","YES","NO",7718.76,101728,"CHECKING","cessna@gmx.net","340000000000009","603-555-2276","609-555-2190","SO",NULL
+"1728","Easter W Cessna","HAY HILL ROADBIRMINGHAM","86001-6296",72.0,"F","married","inactive",26.0,"NO","NO","YES","NO",20869.23,201728,"SAVINGS","cessna@gmx.net","340000000000009","603-555-2276","609-555-2190","SO",NULL
+"1729","Jana A Phillip","----------CHICHESTER WEST SUSS","77868-3732",34.0,"M","married","employee",9.0,"NO","NO","NO","NO",23403.00,101729,"CHECKING","jphillip@cicn.gov","4024 0071 2159 5481","417-555-6307",NULL,"YE","317-555-6945"
+"1730","Georganne U Asbahr","FACTORY ROADUPTON - POOLE - DORS","30340-1452",75.0,"F","widowed","pensioner",37.0,"NO","NO","NO","NO",9112.00,101730,"CHECKING","asbahr@aol.com","4146-6643-9004-5458","401-555-3063",NULL,"SI","916-555-5956"
+"1731","Namil R Weir","StirchleyBirmingham","32609-3323",77.0,"F","married","inactive",9.0,"YES","NO","YES","NO",8183.43,101731,"CHECKING","weir@gmx.net","3528309518562063","404-555-7490",NULL,"LY","518-555-7548"
+"1731","Namil R Weir","StirchleyBirmingham","32609-3323",77.0,"F","married","inactive",9.0,"YES","NO","YES","NO",22125.57,201731,"SAVINGS","weir@gmx.net","3528309518562063","404-555-7490",NULL,"LY","518-555-7548"
+"1732","Gardner O Acquaro","BRAC CREDIT INTERNATIONALHERTS","33102-5115",72.0,"F","widowed","pensioner",4.0,"NO","NO","NO","YES",4504.00,101732,"CHECKING","gardner.acquaro@aol.com","5169-7990-9185-4334","512-555-4064","202-555-9398","MP",NULL
+"1733","Sergio Q Tandy","Bishops StortfordHertfordshire","85082-7983",20.0,"M","single","worker",2.0,"NO","NO","NO","YES",2134.00,101733,"CHECKING","standy@aol.com","6520224090045455","405-555-7798",NULL,"ML","775-555-5788"
+"1734","Lamont R Soucy","----------Worthing","30029",20.0,"F","single","inactive",1.0,"NO","NO","NO","YES",1386.18,101734,"CHECKING","lamont_soucy@gmx.net","5580977968891503","402-555-6474",NULL,"GA","651-555-6097"
+"1734","Lamont R Soucy","----------Worthing","30029",20.0,"F","single","inactive",1.0,"NO","NO","NO","YES",3747.81,201734,"SAVINGS","lamont_soucy@gmx.net","5580977968891503","402-555-6474",NULL,"GA","651-555-6097"
+"1735","Eva D Fontes","WythenshaweManchester","77092-7028",21.0,"F","single","inactive",4.0,"NO","YES","NO","YES",5605.00,101735,"CHECKING","evafontes@msl.org","180030951856201","501-555-1875",NULL,"PH","417-555-8433"
+"1736","Liam Q Lemuel","58/59 Lower High StreetWest Midlands","33755-1008",44.0,"M","single","worker",23.0,"NO","NO","NO","YES",24924.00,101736,"CHECKING","lemuel@yahoo.com","5169799091854334","804-555-8311",NULL,"BT","608-555-1769"
+"1737","Steven W Liedahl","Barton BlountChurch Broughton","40290",44.0,"F","married","employee",28.0,"NO","NO","YES","NO",1248.21,101737,"CHECKING","liedahl@msn.com",NULL,"385-555-5567","603-555-3180","BG","904-555-8114"
+"1737","Steven W Liedahl","Barton BlountChurch Broughton","40290",44.0,"F","married","employee",28.0,"NO","NO","YES","NO",3374.79,201737,"SAVINGS","liedahl@msn.com",NULL,"385-555-5567","603-555-3180","BG","904-555-8114"
+"1738","Gene C Gregory","DO NOT MAILKenilworth","34688-5000",66.0,"F","married","worker",23.0,"NO","NO","NO","YES",29602.00,101738,"CHECKING","gregory@web.de","6520224090045455","406-555-1780",NULL,"PK","208-555-8645"
+"1739","Fergus V Fiore","3-5 swallow placwMayfairLondon","59930-0100",59.0,"M","single","farmer",23.0,"YES","NO","NO","YES",40225.00,101739,"CHECKING","ffiore@msl.org","30011111111119","307-555-2864","517-555-4312","PR","417-555-7930"
+"1740","Jayne L Severson","HestonHounslow","34230",19.0,"M","single","inactive",5.0,"YES","YES","NO","YES",-7.29,101740,"CHECKING","jayne.severson@gmx.net","30411111111111","517-555-5465",NULL,"LR","401-555-5197"
+"1740","Jayne L Severson","HestonHounslow","34230",19.0,"M","single","inactive",5.0,"YES","YES","NO","YES",-19.71,201740,"SAVINGS","jayne.severson@gmx.net","30411111111111","517-555-5465",NULL,"LR","401-555-5197"
+"1741","Mortique X Parente","Perry BarrBirmingham","77619",52.0,"F","married","inactive",4.0,"NO","YES","NO","YES",1298.00,101741,"CHECKING",NULL,"5520111111111121","307-555-534","808-555-5093","IE","317-555-2244"
+"1742","Sherwood O Gray","Arlington Way Sundorne RetailShrewsbury","30076-3738",50.0,"F","married","worker",23.0,"NO","NO","YES","NO",33409.00,101742,"SAVINGS","sherwood_g@icloud.com","30111111161229","717-555-5545",NULL,"FI","sherwood_gray@msl.org"
+"1743","Mai M Reider","MiddletonManchester","33811-1293",64.0,"F","married","inactive",13.0,"YES","NO","YES","NO",3452.49,101743,"SAVINGS","reider@msn.com","4024-0071-2159-5481","808-555-5028","804-555-51","SZ","515-555-9021"
+"1743","Mai M Reider","MiddletonManchester","33811-1293",64.0,"F","married","inactive",13.0,"YES","NO","YES","NO",9334.51,201743,"CHECKING","reider@msn.com","4024-0071-2159-5481","808-555-5028","804-555-51","SZ","515-555-9021"
+"1744","Newton C Dodson","Halesfield.2Shropshire","77092-7028",76.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",12074.00,101744,"SAVINGS","newton_dodson@blue.com","3530 1113 3330 0000","603-555-9296","515-555-8779","UM",NULL
+"1745","Mac W Barchi","GREENHILL LANEDERBYSHIRE","77016-2723",59.0,"F","married","farmer",35.0,"YES","NO","YES","NO",25346.00,101745,"SAVINGS","barchi@yahoo.com","5580977968891503","804-555-1448",NULL,"TR","512-555-1957"
+"1746","Rosmira V Barrett","---------- BEDLINGTON NORTHUMBERLAND ENGLAND","33687-6579",74.0,"M","married","pensioner",26.0,"NO","NO","YES","YES",5516.37,101746,"SAVINGS","barrett@ibm.com","36111111111111","502-555-1908",NULL,"LY","404-555-1049"
+"1746","Rosmira V Barrett","---------- BEDLINGTON NORTHUMBERLAND ENGLAND","33687-6579",74.0,"M","married","pensioner",26.0,"NO","NO","YES","YES",14914.63,201746,"CHECKING","barrett@ibm.com","36111111111111","502-555-1908",NULL,"LY","404-555-1049"
+"1747","Georges R Donahue","Great BarrBirmingham","27983-7491",66.0,"F","widowed","pensioner",25.0,"YES","NO","NO","YES",4228.00,101747,"SAVINGS","georges_d@icloud.com","5169 7990 9185 4334","501-555-2606","303-555-2225","PG","609-555-5314"
+"1748","Morris G Alvares","Leabrook RoadWEST MIDLANDS","78003-4207",41.0,"M","single","employee",15.0,"NO","YES","NO","YES",61.00,101748,"SAVINGS","morris.alvares@gmx.net","213130951856200","615-555-2666",NULL,"CH","603-555-9049"
+"1749","Gayle D Shaner","Nottingham RoadBelper","85749-9359",61.0,"M","married","worker",20.0,"NO","NO","YES","YES",6542.91,101749,"SAVINGS","shaner@yahoo.com","36111111111111","505-555-6625",NULL,"MQ","404-555-7271"
+"1749","Gayle D Shaner","Nottingham RoadBelper","85749-9359",61.0,"M","married","worker",20.0,"NO","NO","YES","YES",17690.09,201749,"CHECKING","shaner@yahoo.com","36111111111111","505-555-6625",NULL,"MQ","404-555-7271"
+"1750","Delmer F Scronce","MinworthSutton Coldfield","85202-1044",25.0,"F","single","inactive",4.0,"NO","NO","NO","YES",1170.00,101750,"SAVINGS","delmer.scronce@icloud.com","30310111161029","401-555-3167","602-555-9384","NG","405-555-4835"
+"1751","Ronna N Risley","Parkeston QuayHarwich","45840-9780",20.0,"F","single","inactive",3.0,"NO","NO","NO","YES",4860.00,101751,"SAVINGS","ronna_r@icloud.com","5285696282092972","515-555-5377","503-555-3718","LK","860-555-4583"
+"1752","Rona G Schissel","ErdingtonBirmingham","59806-0327",45.0,"M","married","worker",20.0,"NO","YES","YES","YES",187.38,101752,"CHECKING","schissel@gmx.net","340000000000009","503-555-1645","517-555-1485","LY","307-555-6385"
+"1752","Rona G Schissel","ErdingtonBirmingham","59806-0327",45.0,"M","married","worker",20.0,"NO","YES","YES","YES",506.62,201752,"SAVINGS","schissel@gmx.net","340000000000009","503-555-1645","517-555-1485","LY","307-555-6385"
+"1753","Noreen A Rivero","Barkby RoadLeicester","32315",51.0,"M","married","employee",20.0,"NO","NO","YES","NO",37743.00,101753,"CHECKING","noreen_rivero@yahoo.com","30111111161229",NULL,NULL,"CY","303-555-8591"
+"1755","Loujenia H Agen","GrennockInverclyde","30353-0416",40.0,"M","married","employee",11.0,"NO","NO","YES","NO",391.50,101755,"CHECKING","loujenia_agen@blue.com","30310111161029","512-555-5037",NULL,"YE","loujenia_a@icloud.com"
+"1755","Loujenia H Agen","GrennockInverclyde","30353-0416",40.0,"M","married","employee",11.0,"NO","NO","YES","NO",1058.50,201755,"SAVINGS","loujenia_agen@blue.com","30310111161029","512-555-5037",NULL,"YE","loujenia_a@icloud.com"
+"1756","Suzette U Karamanian","Bellbrook ParkUckfield","32409-1695",71.0,"M","married","pensioner",32.0,"YES","NO","YES","NO",37303.00,101756,"CHECKING","suzettekaramanian@t-online.de","180030951856201","804-555-2992",NULL,"PY","401-555-3361"
+"1757","Zana Y Whitman","105 Devonshire Road 105 Devonshire Road London","77092-7028",65.0,"M","widowed","pensioner",26.0,"NO","NO","NO","YES",97837.00,101757,"CHECKING","zwhitman@ccdef.net",NULL,"907-555-8587",NULL,"BY","402-555-8072"
+"1758","Sal Y Sinkler","Hall GreenBirmingham","29180",74.0,"F","married","pensioner",24.0,"YES","NO","YES","NO",17977.95,101758,"CHECKING","sal.sinkler@ccdef.net",NULL,"307-555-4278",NULL,"GM","sal.sinkler@msn.com"
+"1758","Sal Y Sinkler","Hall GreenBirmingham","29180",74.0,"F","married","pensioner",24.0,"YES","NO","YES","NO",48607.04,201758,"SAVINGS","sal.sinkler@ccdef.net",NULL,"307-555-4278",NULL,"GM","sal.sinkler@msn.com"
+"1759","Tibor H Evernham","Chandler`s FordEastleigh","75381-9060",70.0,"M","married","inactive",22.0,"YES","NO","YES","NO",12689.00,101759,"CHECKING","tevernham@yahoo.com","180030951856201","502-555-8135",NULL,"TK","tibor_e@cicn.gov"
+"1760","Cara T Hughes","Industrial Est.Witney","27263-3163",66.0,"F","married","inactive",26.0,"NO","NO","YES","NO",40240.00,101760,"CHECKING","cara_hughes@ccdef.net","180030951856201","701-555-1931","573-555-4239","EG","802-555-4282"
+"1761","Sammye N Munsch","----------NEWCASTLE UPON TYNE","29020-9557",44.0,"M","widowed","worker",18.0,"YES","NO","NO","NO",11303.01,101761,"CHECKING","munsch@gmx.net","5169 7990 9185 4334","401-555-3587",NULL,"KE","515-555-2339"
+"1761","Sammye N Munsch","----------NEWCASTLE UPON TYNE","29020-9557",44.0,"M","widowed","worker",18.0,"YES","NO","NO","NO",30559.98,201761,"SAVINGS","munsch@gmx.net","5169 7990 9185 4334","401-555-3587",NULL,"KE","515-555-2339"
+"1762","Kaye F Worrell","BanburyOxon","32304",57.0,"F","married","farmer",20.0,"YES","NO","NO","YES",21691.00,101762,"CHECKING","worrell@gmail.com","30510011111111","302-555-5988","515-555-46","LI","808-555-2342"
+"1763","Violetta H Manion","110 KEW GREEN / KEW RICHMONDSURREY","77546-4856",22.0,"M","single","inactive",5.0,"YES","NO","NO","YES",1483.00,101763,"CHECKING","violetta_manion@yahoo.com","4146 6643 9004 5458","785-555-7623","617-555-2797","SB","505-555-8632"
+"1764","Harris D Leahy","22 HANOVER SQUARELONDON","34689-2798",78.0,"F","widowed","inactive",12.0,"YES","NO","NO","NO",6053.13,101764,"CHECKING","leahy@t-online.de","38111111111119","804-555-5994",NULL,"CH","406-555-5543"
+"1764","Harris D Leahy","22 HANOVER SQUARELONDON","34689-2798",78.0,"F","widowed","inactive",12.0,"YES","NO","NO","NO",16365.86,201764,"SAVINGS","leahy@t-online.de","38111111111119","804-555-5994",NULL,"CH","406-555-5543"
+"1765","Lin B Needleman","RedditchWorcester","86339",74.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",36433.00,101765,"CHECKING","lin_n@gmx.net","3528-3095-1856-2063","501-555-6265",NULL,"PE","614-555-8338"
+"1766","Carlos S Kirts","Leabrook RoadWEST MIDLANDS","87176",36.0,"F","married","employee",5.0,"NO","NO","YES","YES",5782.00,101766,"CHECKING","carloskirts@icloud.com","36111111111111","717-555-6307",NULL,"AU","701-555-5337"
+"1767","Hudson Y Connors","Bishops StortfordHertfordshire","85202",60.0,"M","married","farmer",32.0,"NO","NO","NO","YES",2329.02,101767,"CHECKING","hconnors@msn.com","4024007121595481","401-555-5175",NULL,"SV","hudson_connors@ccdef.net"
+"1767","Hudson Y Connors","Bishops StortfordHertfordshire","85202",60.0,"M","married","farmer",32.0,"NO","NO","NO","YES",6296.98,201767,"SAVINGS","hconnors@msn.com","4024007121595481","401-555-5175",NULL,"SV","hudson_connors@ccdef.net"
+"1768","Roxie W Woolley","1 CHURCH ROWKENT","77080",53.0,"F","divorced","inactive",11.0,"YES","NO","YES","NO",16178.00,101768,"CHECKING","rwoolley@web.de","3400 000000 00009","717-555-8575","512-555-4065","CV","603-555-497"
+"1769","Bryon J Ghrist","AllingtonMaidstone","85206-3481",26.0,"F","married","intermediate professions",0.0,"NO","NO","YES","NO",9784.00,101769,"CHECKING","ghrist@de.ibm.com","5169-7990-9185-4334","505-555-1974",NULL,"BN","401-555-8962"
+"1770","Edith N Davis","COLNBROOKSLOUGH","77092-7028",23.0,"F","single","inactive",3.0,"NO","YES","NO","YES",329.94,101770,"CHECKING","edith_davis@aol.com","5520111111111121","609-555-4454",NULL,"CU","davis@ibm.com"
+"1770","Edith N Davis","COLNBROOKSLOUGH","77092-7028",23.0,"F","single","inactive",3.0,"NO","YES","NO","YES",892.06,201770,"SAVINGS","edith_davis@aol.com","5520111111111121","609-555-4454",NULL,"CU","davis@ibm.com"
+"1771","Jena-Lee G Diller","Holder RoadAldershot","74464-3603",63.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",28441.00,101771,"CHECKING","diller@t-online.de","30411111111111","651-555-3139",NULL,"AD","jena-lee_diller@icloud.com"
+"1772","Bonisace J Pacolt","BanburyOxon","25177",19.0,"F","single","inactive",1.0,"NO","NO","NO","YES",773.00,101772,"CHECKING","bonisace_p@ccdef.net","5462522444922689","404-555-2439","775-555-9452","WF","603-555-4181"
+"1773","Darrell N Thronson","FACTORY ROADUPTON - POOLE - DORS","27606-3386",71.0,"F","married","inactive",34.0,"YES","NO","YES","NO",3784.59,101773,"CHECKING","dthronson@yahoo.com","38111111111119","334-555-4224",NULL,"AE","225-555-300"
+"1773","Darrell N Thronson","FACTORY ROADUPTON - POOLE - DORS","27606-3386",71.0,"F","married","inactive",34.0,"YES","NO","YES","NO",10232.41,201773,"SAVINGS","dthronson@yahoo.com","38111111111119","334-555-4224",NULL,"AE","225-555-300"
+"1774","Lavern C Heim","224 Marsh HillBirmingham","27563",67.0,"M","married","farmer",26.0,"YES","NO","YES","NO",11253.00,101774,"CHECKING","lavern_h@yahoo.com","5580977968891503","602-555-4021","360-555-1578","GT","lheim@msl.org"
+"1775","Gennadi P Donk","CLITTAFORD RD SOUTHWAYPLYMOUTH","96540-2200",71.0,"F","married","pensioner",35.0,"YES","NO","YES","NO",41714.00,101775,"CHECKING","gdonk@yahoo.com","3528309518562063","802-555-38","904-555-4072","BY","405-555-9879"
+"1776","Mason X Agyurre","Berkswell RoadMeriden","75220-2314",73.0,"M","married","pensioner",35.0,"YES","NO","YES","YES",11262.78,101776,"CHECKING","agyurre@web.de","30510011111111","802-555-4022",NULL,"IN","401-555-4873"
+"1776","Mason X Agyurre","Berkswell RoadMeriden","75220-2314",73.0,"M","married","pensioner",35.0,"YES","NO","YES","YES",30451.21,201776,"SAVINGS","agyurre@web.de","30510011111111","802-555-4022",NULL,"IN","401-555-4873"
+"1777","Maithreyi V Dobson","2 Cornwall StreetBirmingham","30083",72.0,"F","widowed","pensioner",21.0,"YES","NO","NO","YES",69224.00,101777,"SAVINGS","maithreyidobson@msl.org","4146 6643 9004 5458","651-555-8876","609-555-7327","FX","405-555-7739"
+"1778","Alice C Ferri","----------NEWCASTLE UPON TYNE","32611-5900",68.0,"M","married","pensioner",40.0,"YES","NO","YES","NO",36777.00,101778,"SAVINGS","alice_ferri@de.ibm.com","180030951856201","615-555-8569","603-555-4422","CN",NULL
+"1779","Joann C Crocker","GASGOIGNE ROADESSEX","32347-9805",63.0,"F","married","inactive",3.0,"NO","NO","NO","NO",10398.24,101779,"SAVINGS","joann_crocker@gmx.net","5169799091854334","804-555-7230","401-555-4915","DZ","573-555-8633"
+"1779","Joann C Crocker","GASGOIGNE ROADESSEX","32347-9805",63.0,"F","married","inactive",3.0,"NO","NO","NO","NO",28113.76,201779,"CHECKING","joann_crocker@gmx.net","5169799091854334","804-555-7230","401-555-4915","DZ","573-555-8633"
+"1780","Sarah E Foster","WEST WORLD WESTGATELONDON","79936-5916",86.0,"M","married","pensioner",19.0,"YES","NO","YES","NO",7722.00,101780,"SAVINGS","sarah_f@t-online.de","3528-3095-1856-2063","605-555-6092",NULL,"MC","405-555-8931"
+"1781","Enrique B Rudlang","56 Springfield RoadBirmingham","66201",40.0,"F","married","executives,self-employed",0.0,"NO","NO","YES","YES",218.00,101781,"SAVINGS","erudlang@cicn.gov","5169 7990 9185 4334","808-555-1151",NULL,"PG","603-555-7636"
+"1782","Hyong E Ruddell","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","30306-2329",25.0,"M","single","inactive",7.0,"NO","YES","NO","YES",267.30,101782,"SAVINGS","hruddell@cicn.gov","30310111161029","515-555-89",NULL,"ET","614-555-6871"
+"1782","Hyong E Ruddell","36 GRAVELLY INDUSTRIAL PARKBIRMINGHAM","30306-2329",25.0,"M","single","inactive",7.0,"NO","YES","NO","YES",722.69,201782,"CHECKING","hruddell@cicn.gov","30310111161029","515-555-89",NULL,"ET","614-555-6871"
+"1783","Marlin M Greenia","Spondon Spondon Derby","77092-7028",38.0,"M","cohabitant","worker",0.0,"NO","NO","YES","NO",52.00,101783,"SAVINGS","mgreenia@yahoo.com","340000000000009","302-555-5381",NULL,"NF",NULL
+"1784","Amos G Czosnyka","WORLE INDUSTRIAL CENTREAVON","28110",56.0,"F","married","craftsmen, storekeepers",4.0,"NO","YES","NO","YES",-2976.00,101784,"SAVINGS",NULL,"3400-000000-00009","573-555-1373","517-555-930","EE",NULL
+"1785","Lynnette S Goldsberry","Aston Triangle Aston Triangle Birmingham","No ZIP",75.0,"F","widowed","inactive",31.0,"YES","NO","NO","NO",3308.31,101785,"SAVINGS","lynnette_goldsberry@msn.com","5285696282092972","502-555-7266",NULL,"CY","334-555-3426"
+"1785","Lynnette S Goldsberry","Aston Triangle Aston Triangle Birmingham","No ZIP",75.0,"F","widowed","inactive",31.0,"YES","NO","NO","NO",8944.69,201785,"CHECKING","lynnette_goldsberry@msn.com","5285696282092972","502-555-7266",NULL,"CY","334-555-3426"
+"1786","Garland E Kimmel","WimborneDorset","90030-0457",40.0,"F","married","craftsmen, storekeepers",22.0,"NO","NO","YES","NO",3423.00,101786,"SAVINGS","garland_k@yahoo.com","340000000000009","208-555-2404",NULL,"RU","775-555-7605"
+"1787","Gilbert H Corona","Clayton Road ATTN AMY WATSONCP651 Hayes","85016-7929",73.0,"F","widowed","inactive",21.0,"YES","NO","NO","YES",15718.00,101787,"SAVINGS","gcorona@web.de","5169799091854334","401-555-7320","317-555-2725","US","gilbert_c@de.ibm.com"
+"1788","Clifford F Holsinger","37 MADDOX STREETLONDON","79821",66.0,"M","married","pensioner",24.0,"YES","NO","YES","YES",16726.77,101788,"SAVINGS","clifford_holsinger@msl.org","30210111161229","385-555-3793","603-555-1143","PK","clifford.holsinger@blue.com"
+"1788","Clifford F Holsinger","37 MADDOX STREETLONDON","79821",66.0,"M","married","pensioner",24.0,"YES","NO","YES","YES",45224.22,201788,"CHECKING","clifford_holsinger@msl.org","30210111161229","385-555-3793","603-555-1143","PK","clifford.holsinger@blue.com"
+"1789","Avery K Weidman","Beetons Way  Burv St  EdmondsSUFFOLK","66215",67.0,"M","married","pensioner",26.0,"YES","NO","YES","YES",21421.00,101789,"SAVINGS","weidman@ibm.com","5169 7990 9185 4334","843-555-8414",NULL,"FR","502-555-7286"
+"1790","Fannie M Graves","GarstangPreston","99801-1700",22.0,"F","single","inactive",2.0,"NO","YES","NO","YES",676.00,101790,"CHECKING","fgraves@cicn.gov","4146 6643 9004 5458","501-555-6066",NULL,"TN","fanniegraves@t-online.de"
+"1791","Ole D Balboni","SmallfieldHorley","34109-6228",68.0,"F","married","inactive",16.0,"NO","NO","YES","NO",729.27,101791,"CHECKING","ole.balboni@web.de","30111111161229","860-555-7139",NULL,"DK","615-555-2286"
+"1791","Ole D Balboni","SmallfieldHorley","34109-6228",68.0,"F","married","inactive",16.0,"NO","NO","YES","NO",1971.73,201791,"SAVINGS","ole.balboni@web.de","30111111161229","860-555-7139",NULL,"DK","615-555-2286"
+"1792","Hamid X Doorley","257 Great Lister StreetBirmingham","78744-1147",80.0,"F","widowed","pensioner",39.0,"YES","NO","NO","YES",20049.00,101792,"CHECKING","hamid_doorley@cicn.gov","5169799091854334","402-555-2325","573-555-3890","PF","334-555-6627"
+"1793","Gwen L Russell","ALFRETON ROADDERBY","30907-2998",76.0,"M","married","pensioner",26.0,"NO","NO","NO","YES",44929.00,101793,"CHECKING","gwen_r@cicn.gov","4146664390045458","512-555-7803","717-555-6785","MD","503-555-704"
+"1794","Loyd G Westerberg","Great Western WaySwindon","79821",26.0,"M","single","inactive",8.0,"NO","NO","NO","YES",1375.11,101794,"CHECKING","loyd_w@de.ibm.com","3530 1113 3330 0000","302-555-6201",NULL,"MQ","401-555-8803"
+"1794","Loyd G Westerberg","Great Western WaySwindon","79821",26.0,"M","single","inactive",8.0,"NO","NO","NO","YES",3717.89,201794,"SAVINGS","loyd_w@de.ibm.com","3530 1113 3330 0000","302-555-6201",NULL,"MQ","401-555-8803"
+"1795","Armond I Demoss","Edinburgh WayHarlow","78934-4946",73.0,"M","married","pensioner",25.0,"YES","NO","YES","YES",24634.00,101795,"CHECKING","ademoss@yahoo.com","30411111111111","302-555-5782",NULL,"FJ","ademoss@ibm.com"
+"1796","Jacquelynn I Haggard","90 SUMMER LANEBIRMINGHAM","28236",27.0,"F","single","intermediate professions",18.0,"NO","NO","NO","YES",58654.00,101796,"CHECKING","jacquelynnhaggard@yahoo.com","6520224090045455","615-555-3377",NULL,"MZ","808-555-4278"
+"1797","Suzanne G King","CROMWELL ROADCAMBS.","No ZIP",70.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",15940.53,101797,"CHECKING","suzanne_king@ccdef.net","5285696282092972","517-555-5567","208-555-9082","IN","919-555-8181"
+"1797","Suzanne G King","CROMWELL ROADCAMBS.","No ZIP",70.0,"F","widowed","pensioner",18.0,"YES","NO","NO","NO",43098.47,201797,"SAVINGS","suzanne_king@ccdef.net","5285696282092972","517-555-5567","208-555-9082","IN","919-555-8181"
+"1798","Patry Q Yates","Little Aston RoadAldridge","77327-9333",64.0,"F","married","pensioner",22.0,"NO","NO","YES","NO",21549.00,101798,"SAVINGS","patry_yates@cicn.gov","36111111111111","907-555-818","502-555-7637","RE","207-555-9323"
+"1799","Nicole U Christlieb","257 Great Lister StreetBirmingham","99224-8480",63.0,"F","married","inactive",35.0,"YES","NO","YES","NO",39763.00,101799,"SAVINGS","nchristlieb@icloud.com","5580977968891503","775-555-4173","907-555-8754","TR","402-555-172"
+"1800","Calvo S Buchanan","FARNCOMBE ROADWEST SUSSEX","34684-1233",54.0,"M","single","intermediate professions",29.0,"NO","NO","NO","YES",727.11,101800,"SAVINGS","buchanan@icloud.com","5580977968891503","916-555-71",NULL,"CO","calvo_buchanan@icloud.com"
+"1800","Calvo S Buchanan","FARNCOMBE ROADWEST SUSSEX","34684-1233",54.0,"M","single","intermediate professions",29.0,"NO","NO","NO","YES",1965.88,201800,"CHECKING","buchanan@icloud.com","5580977968891503","916-555-71",NULL,"CO","calvo_buchanan@icloud.com"
+"1801","Shanna K Ajamie","MinworthSutton Coldfield","30052",32.0,"M","married","intermediate professions",10.0,"NO","NO","YES","NO",-1457.00,101801,"SAVINGS","ajamie@cicn.gov","3400-000000-00009","517-555-3281","573-555-3410","GU",NULL
+"1802","Joan Z Foley","StirchleyBirmingham","85233",24.0,"M","single","inactive",4.0,"NO","YES","NO","YES",3420.00,101802,"SAVINGS","foley@aol.com","5580977968891503","573-555-9245","207-555-2396","AM","907-555-7734"
+"1803","Pierre V Payne","6/8 HIGH STREETCHESHIRE","33831-0928",80.0,"M","married","pensioner",45.0,"NO","NO","YES","YES",4879.71,101803,"SAVINGS","ppayne@web.de","3400-000000-00009","601-555-3096",NULL,"NA","pierre_payne@msn.com"
+"1803","Pierre V Payne","6/8 HIGH STREETCHESHIRE","33831-0928",80.0,"M","married","pensioner",45.0,"NO","NO","YES","YES",13193.28,201803,"CHECKING","ppayne@web.de","3400-000000-00009","601-555-3096",NULL,"NA","pierre_payne@msn.com"
+"1804","Wes A Gardeline","SIR FRANCIS LEY IND. PARKDERBY","90640",82.0,"F","widowed","inactive",43.0,"YES","NO","YES","YES",25970.00,101804,"SAVINGS","wes_gardeline@ibm.com","378282246310005","717-555-6760","512-555-8063","SZ","614-555-7736"
+"1805","Lynn H Tanaka","WittonBIRMINGHAM","88268",58.0,"M","married","farmer",26.0,"YES","NO","YES","NO",99998.00,101805,"SAVINGS","lynn_t@msl.org","5423111111111111","785-555-1531","785-555-6794","CR","808-555-4238"
+"1806","Regean N Kaupp","AbergavennyGwent","38478-4711",78.0,"F","widowed","pensioner",31.0,"YES","NO","NO","NO",8233.92,101806,"SAVINGS","regean_k@yahoo.com","5169-7990-9185-4334","401-555-2","775-555-8523","GH","916-555-7997"
+"1806","Regean N Kaupp","AbergavennyGwent","38478-4711",78.0,"F","widowed","pensioner",31.0,"YES","NO","NO","NO",22262.07,201806,"CHECKING","regean_k@yahoo.com","5169-7990-9185-4334","401-555-2","775-555-8523","GH","916-555-7997"
+"1807","Petr K Bell","Frederick RoadBirmingham","34747",72.0,"F","married","inactive",30.0,"YES","NO","YES","NO",5257.00,101807,"CHECKING","petr_bell@ibm.com","6220264390045758","614-555-3116","517-555-3626","VU","petr.bell@ccdef.net"
+"1808","Collis I West","HockleyBirmingham","98270",82.0,"F","divorced","inactive",30.0,"YES","NO","YES","YES",5257.00,101808,"CHECKING","colliswest@blue.com","38111111111119","502-555-869","907-555-8700","QA","651-555-3923"
+"1809","Liam X Newsom","Rockingham RoadLeicester","77474-9337",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","NO",5188.59,101809,"CHECKING","liam_n@ccdef.net","5285696282092972","417-555-1255","406-555-6683","UZ","liamnewsom@web.de"
+"1809","Liam X Newsom","Rockingham RoadLeicester","77474-9337",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","NO",14028.41,201809,"SAVINGS","liam_n@ccdef.net","5285696282092972","417-555-1255","406-555-6683","UZ","liamnewsom@web.de"
+"1810","Marge J Dechenne","Rockingham RoadLeicester","33026-3969",40.0,"F","married","employee",19.0,"NO","YES","YES","NO",35410.00,101810,"CHECKING","mdechenne@gmx.net","5520111111111121","651-555-910","401-555-5320","GF","573-555-8148"
+"1811","Gerry W Knipp","Bishops StortfordHertfordshire","32611",85.0,"F","widowed","inactive",12.0,"NO","NO","NO","NO",10041.00,101811,"CHECKING","gerry_k@web.de","3400-000000-00009","517-555-9367","609-555-8923","UZ","317-555-8895"
+"1812","Cory R Oas","CLARKE STREETDERBY","92121-2990",77.0,"F","married","inactive",19.0,"YES","NO","YES","YES",8557.11,101812,"CHECKING","oas@t-online.de","30310111161029","515-555-8251",NULL,"CA","608-555-5253"
+"1812","Cory R Oas","CLARKE STREETDERBY","92121-2990",77.0,"F","married","inactive",19.0,"YES","NO","YES","YES",23135.89,201812,"SAVINGS","oas@t-online.de","30310111161029","515-555-8251",NULL,"CA","608-555-5253"
+"1813","Theresia J Lian","----------CHICHESTER WEST SUSS","29602",20.0,"F","single","inactive",5.0,"NO","NO","NO","YES",1108.00,101813,"CHECKING","tlian@msl.org","5462522444922689","404-555-1195","402-555-3704","UA","402-555-8027"
+"1814","Ronald B Mcguinness","HorsforthLeeds","27629",73.0,"F","married","inactive",13.0,"YES","NO","YES","NO",57871.00,101814,"CHECKING","ronaldmcguinness@blue.com","213130951856200","405-555-8386","843-555-165","MS","302-555-162"
+"1815","Shaaban F Vasavada","Park Farm Industrial EstateWellingborough","33332",72.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",3863.16,101815,"CHECKING","shaaban.vasavada@web.de","4024 0071 2159 5481","701-555-4841","802-555-831","MR","shaaban.vasavada@gmx.net"
+"1815","Shaaban F Vasavada","Park Farm Industrial EstateWellingborough","33332",72.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",10444.84,201815,"SAVINGS","shaaban.vasavada@web.de","4024 0071 2159 5481","701-555-4841","802-555-831","MR","shaaban.vasavada@gmx.net"
+"1816","Cleve D Bridwell","Western Industrial EstateCaerphilly","28405-4161",60.0,"F","married","farmer",23.0,"YES","NO","YES","YES",28111.00,101816,"CHECKING","cleve.bridwell@t-online.de","6011567891012132","843-555-6576",NULL,"KN","cleve_b@msl.org"
+"1817","Karen N Lien","DARTMOUTH ROADWEST MIDLANDS","86335-0922",75.0,"F","widowed","pensioner",22.0,"NO","NO","NO","YES",294296.00,101817,"CHECKING","klien@msn.com","30011111111119","404-555-1820",NULL,"CR","609-555-7079"
+"1818","Felip P Mcdavid","Parkeston QuayHarwich","32407",77.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",7055.64,101818,"CHECKING","felip_mcdavid@msn.com","5423111111111111","601-555-1738",NULL,"VC","775-555-2294"
+"1818","Felip P Mcdavid","Parkeston QuayHarwich","32407",77.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",19076.36,201818,"SAVINGS","felip_mcdavid@msn.com","5423111111111111","601-555-1738",NULL,"VC","775-555-2294"
+"1819","Conley O Wright","Barton BlountChurch Broughton","98506-2857",75.0,"F","married","inactive",1.0,"YES","NO","YES","NO",11008.00,101819,"CHECKING","conley_w@gmx.net","5169 7990 9185 4334","307-555-8162",NULL,"DO","conley.wright@ccdef.net"
+"1820","Willy S Besade","3-5 SWALLOW PLACELondon","87505",76.0,"F","married","inactive",5.0,"NO","NO","NO","YES",47855.00,101820,"CHECKING","willy_besade@gmx.net",NULL,"843-555-4383",NULL,"CF","512-555-8361"
+"1821","Ferrell Q Downey","Perry BarrBirmingham","36203",21.0,"M","single","inactive",4.0,"NO","YES","NO","YES",800.55,101821,"SAVINGS","ferrell_d@t-online.de","30210111161229","803-555-9452",NULL,"CK","775-555-7721"
+"1821","Ferrell Q Downey","Perry BarrBirmingham","36203",21.0,"M","single","inactive",4.0,"NO","YES","NO","YES",2164.45,201821,"CHECKING","ferrell_d@t-online.de","30210111161229","803-555-9452",NULL,"CK","775-555-7721"
+"1822","Spencer T Lytle","Node CourtCodicote","77340-6499",67.0,"F","widowed","inactive",4.0,"NO","NO","NO","YES",26761.00,101822,"SAVINGS","lytle@yahoo.com","4146-6643-9004-5458","512-555-1706",NULL,"SZ","608-555-6379"
+"1823","Wyman V Reichelman","NorthgateAldridge","27615",60.0,"F","widowed","worker",17.0,"NO","NO","NO","NO",16834.00,101823,"SAVINGS","wyman_r@blue.com","30011111111119","515-555-4401","602-555-7551","IS","505-555-4363"
+"1824","Danese P Rose","Industrial Est.Witney","34744",58.0,"F","married","farmer",28.0,"YES","NO","YES","NO",11704.23,101824,"SAVINGS","drose@de.ibm.com","5169799091854334","401-555-1006","515-555-7245","PA","385-555-6435"
+"1824","Danese P Rose","Industrial Est.Witney","34744",58.0,"F","married","farmer",28.0,"YES","NO","YES","NO",31644.77,201824,"CHECKING","drose@de.ibm.com","5169799091854334","401-555-1006","515-555-7245","PA","385-555-6435"
+"1825","Gaylord P Huot","105 Devonshire RoadLondon","43213",54.0,"M","married","worker",30.0,"NO","NO","YES","YES",1575.00,101825,"CHECKING","gaylord_huot@gmx.net","4024007121595481","573-555-4055","402-555-7935","YT","406-555-7744"
+"1826","Reuben A Ramos","Canal RoadLeeds","85011",24.0,"M","single","executives,self-employed",4.0,"NO","YES","NO","YES",2436.00,101826,"CHECKING","reuben_r@web.de","3528-3095-1856-2063","417-555-329","208-555-4679","SZ",NULL
+"1827","Earl Z Bryant","173 Friar Street 173 Friar Street Reading","86405-3356",41.0,"M","married","worker",22.0,"NO","NO","NO","NO",774.36,101827,"CHECKING","earl_bryant@aol.com","6220264390045758","401-555-2381",NULL,"VC","775-555-3345"
+"1827","Earl Z Bryant","173 Friar Street 173 Friar Street Reading","86405-3356",41.0,"M","married","worker",22.0,"NO","NO","NO","NO",2093.64,201827,"SAVINGS","earl_bryant@aol.com","6220264390045758","401-555-2381",NULL,"VC","775-555-3345"
+"1828","Tory H Turley","AIRPORT WAY LUTONBEDFORDSHIRE LU2 9NI","86322-4241",40.0,"F","married","employee",15.0,"NO","YES","YES","NO",1761.00,101828,"CHECKING","turley@aol.com","5285696282092972","518-555-9977",NULL,"PW","tturley@gmail.com"
+"1829","Jamie L Fink","North LaneAldershot","87500",44.0,"F","married","farmer",23.0,"NO","NO","YES","NO",18701.00,101829,"CHECKING","jamie_f@aol.com",NULL,"317-555-876",NULL,"NP","501-555-770"
+"1830","Ivars T Rozsa","60 FREDERICK STREETBIRMINGHAM","34741",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","YES",3200.85,101830,"SAVINGS","rozsa@blue.com","6011567891012132","202-555-3047",NULL,"BR","irozsa@msl.org"
+"1830","Ivars T Rozsa","60 FREDERICK STREETBIRMINGHAM","34741",85.0,"F","widowed","pensioner",23.0,"NO","NO","NO","YES",8654.15,201830,"CHECKING","rozsa@blue.com","6011567891012132","202-555-3047",NULL,"BR","irozsa@msl.org"
+"1831","Tiffany J Pierson","UNIT 33 IMEX BUSINESS PARKBIRMINGHAM","27604-3754",77.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",65400.00,101831,"SAVINGS","tiffany_p@cicn.gov","3400-000000-00009","501-555-7786",NULL,"PW","907-555-7"
+"1832","Cindi Q Burns","178/188 Great South West RoadHounslow","30013",73.0,"F","married","inactive",26.0,"YES","NO","YES","NO",4534.00,101832,"SAVINGS","burns@msl.org","38111111111119","615-555-1913","207-555-422","AU","burns@web.de"
+"1833","Jurij V Dreckman","3-5 SWALLOW PLACELondon","75212-4217",71.0,"F","married","inactive",1.0,"NO","NO","YES","NO",7915.59,101833,"SAVINGS","jdreckman@gmx.net","36111111111111","207-555-784",NULL,"SA","573-555-4772"
+"1833","Jurij V Dreckman","3-5 SWALLOW PLACELondon","75212-4217",71.0,"F","married","inactive",1.0,"NO","NO","YES","NO",21401.41,201833,"CHECKING","jdreckman@gmx.net","36111111111111","207-555-784",NULL,"SA","573-555-4772"
+"1834","Gail C Meredith","Lower EarlyReading","33637-6744",59.0,"F","married","intermediate professions",25.0,"NO","NO","NO","NO",-326.00,101834,"CHECKING","gmeredith@msl.org","5169-7990-9185-4334","517-555-594",NULL,"AQ","meredith@msl.org"
+"1835","Brantley C Araica","18-20 CROFTSBANK ROADMANCHESTER","77041-5361",38.0,"M","married","worker",20.0,"NO","YES","YES","NO",2073.00,101835,"CHECKING","brantley.araica@gmx.net","6520224090045455","302-555-9590","808-555-9604","MO","850-555-5761"
+"1836","Mamie L Matteson","Balliol Business Park East Balliol Business Park East Newcastle upon Tyne","33458",36.0,"F","single","employee",18.0,"NO","YES","NO","YES",110.43,101836,"CHECKING","matteson@aol.com","3528309518562063","302-555-4354",NULL,"LV","517-555-7991"
+"1836","Mamie L Matteson","Balliol Business Park East Balliol Business Park East Newcastle upon Tyne","33458",36.0,"F","single","employee",18.0,"NO","YES","NO","YES",298.57,201836,"SAVINGS","matteson@aol.com","3528309518562063","302-555-4354",NULL,"LV","517-555-7991"
+"1837","Gerard P Wendoloski","55 London RoadSt Albans","30353-0416",44.0,"M","single","employee",23.0,"NO","NO","NO","YES",12281.00,101837,"CHECKING","gerard_w@ibm.com","6220264390045758","651-555-1558",NULL,"KR","gerard_w@msn.com"
+"1838","Chet N Lacount","SpekeLiverpool","86426",50.0,"F","married","farmer",20.0,"NO","YES","YES","NO",7762.00,101838,"SAVINGS","clacount@yahoo.com","5520111111111121","505-555-7422","614-555-9032","JO","307-555-8357"
+"1839","Will Z Christensen","2 Purley WayCroydon","77039-3804",65.0,"M","married","pensioner",12.0,"NO","NO","YES","YES",3789.45,101839,"SAVINGS","willchristensen@web.de","4146664390045458","208-555-308",NULL,"RO","christensen@ccdef.net"
+"1839","Will Z Christensen","2 Purley WayCroydon","77039-3804",65.0,"M","married","pensioner",12.0,"NO","NO","YES","YES",10245.55,201839,"CHECKING","willchristensen@web.de","4146664390045458","208-555-308",NULL,"RO","christensen@ccdef.net"
+"1840","Nels F Le","SpekeLiverpool","32215",26.0,"M","single","inactive",11.0,"NO","NO","NO","YES",20476.00,101840,"SAVINGS","le@msl.org","30411111111111","602-555-7187","804-555-3101","GA","nle@blue.com"
+"1841","Linwood R Nicholes","DissNorfolk","28166-0485",39.0,"M","cohabitant","intermediate professions",15.0,"NO","NO","YES","NO",27208.00,101841,"SAVINGS","nicholes@t-online.de","30411111111111","518-555-5172","207-555-9118","GB","linwood_n@msn.com"
+"1842","Nathaniel O Arcadia","----------Kingswinford","90060-0036",80.0,"M","married","pensioner",42.0,"YES","NO","YES","NO",1732.59,101842,"CHECKING","nathaniel_a@t-online.de","213130951856200","603-555-499",NULL,"IO","207-555-176"
+"1842","Nathaniel O Arcadia","----------Kingswinford","90060-0036",80.0,"M","married","pensioner",42.0,"YES","NO","YES","NO",4684.41,201842,"SAVINGS","nathaniel_a@t-online.de","213130951856200","603-555-499",NULL,"IO","207-555-176"
+"1843","Marjosa T Antunes","STAND PARK / SHEFFIELD ROADCHESTERFIELD DERBY","77342-0099",47.0,"M","married","worker",25.0,"NO","NO","YES","NO",19627.00,101843,"CHECKING","marjosa_a@msn.com","30310111161029","303-555-1973",NULL,"CX","208-555-2700"
+"1844","Duncan Q Muldoon","120 VYSE STREETBIRMINGHAM","85011",79.0,"M","married","pensioner",48.0,"YES","NO","YES","NO",21710.00,101844,"CHECKING","duncan_m@t-online.de","3528-3095-1856-2063","802-555-9198",NULL,"FX","dmuldoon@t-online.de"
+"1845","Christian P Finnin","TamworthStaffs","75212-4217",62.0,"F","married","inactive",17.0,"NO","NO","YES","NO",3553.20,101845,"CHECKING","christianfinnin@icloud.com","38111111111119","503-555-6831","303-555-9346","KE","615-555-2147"
+"1845","Christian P Finnin","TamworthStaffs","75212-4217",62.0,"F","married","inactive",17.0,"NO","NO","YES","NO",9606.80,201845,"SAVINGS","christianfinnin@icloud.com","38111111111119","503-555-6831","303-555-9346","KE","615-555-2147"
+"1846","Sammye O Krants","Foundry CloseHorsham","32809-5500",61.0,"F","divorced","inactive",1.0,"NO","NO","NO","YES",11046.00,101846,"CHECKING",NULL,"4146 6643 9004 5458","775-555-8745","401-555-4134","AW","808-555-1584"
+"1847","Belinda X Christman","Tame RoadBirmingham","34266",37.0,"F","cohabitant","employee",3.0,"NO","NO","YES","YES",3802.00,101847,"SAVINGS","belindachristman@msn.com","30510011111111",NULL,NULL,"MX","916-555-4373"
+"1848","Christie M Artus","DroitwichWorcester","77041-5361",25.0,"M","single","worker",5.0,"NO","YES","NO","YES",285.12,101848,"SAVINGS","christie.artus@de.ibm.com","6220264390045758","701-555-9430","843-555-9674","BI","334-555-4440"
+"1848","Christie M Artus","DroitwichWorcester","77041-5361",25.0,"M","single","worker",5.0,"NO","YES","NO","YES",770.88,201848,"CHECKING","christie.artus@de.ibm.com","6220264390045758","701-555-9430","843-555-9674","BI","334-555-4440"
+"1849","Melinda W Aboytez","SMETHWICK WARLEYWEST MIDLANDS","77093-1894",39.0,"M","married","farmer",20.0,"NO","NO","YES","NO",1377.00,101849,"SAVINGS","melindaaboytez@de.ibm.com","30510011111111","501-555-9781",NULL,"EE","melinda_aboytez@ccdef.net"
+"1850","Rino H Lepper","Breeds PlaceHastings","85085",36.0,"F","single","inactive",17.0,"NO","NO","NO","NO",11125.00,101850,"SAVINGS","lepper@cicn.gov","4146664390045458","605-555-9774",NULL,"CU","512-555-3111"
+"1851","Margret H Anna","WittonBirmingham","32792-2237",19.0,"M","single","employee",3.0,"NO","YES","NO","YES",427.41,101851,"SAVINGS","anna@yahoo.com","4146664390045458","512-555-5064",NULL,"HN",NULL
+"1851","Margret H Anna","WittonBirmingham","32792-2237",19.0,"M","single","employee",3.0,"NO","YES","NO","YES",1155.59,201851,"CHECKING","anna@yahoo.com","4146664390045458","512-555-5064",NULL,"HN",NULL
+"1852","Cesarea C Cho","Trafalgar WayCamberley","28001",71.0,"F","widowed","pensioner",8.0,"NO","NO","NO","YES",1829.00,101852,"CHECKING","cho@web.de","30210111161229","502-555-3128",NULL,"SH","cho@cicn.gov"
+"1853","Michel B Spiegel","DissNorfolk","76548",75.0,"F","married","pensioner",31.0,"YES","NO","YES","NO",36470.00,101853,"CHECKING","mspiegel@web.de","38111111111119","406-555-4137",NULL,"SN","919-555-7331"
+"1854","Mitchell V Redding","Castletown WaySunderland","87015-9739",68.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",4644.27,101854,"CHECKING","redding@t-online.de","5169-7990-9185-4334","803-555-2934","503-555-8790","SL","803-555-8894"
+"1854","Mitchell V Redding","Castletown WaySunderland","87015-9739",68.0,"F","married","pensioner",26.0,"YES","NO","YES","NO",12556.73,201854,"SAVINGS","redding@t-online.de","5169-7990-9185-4334","803-555-2934","503-555-8790","SL","803-555-8894"
+"1855","Linden D Soforenko","ABERCRAVE CAERBONTSWANSEA","27419",50.0,"F","married","craftsmen, storekeepers",18.0,"NO","NO","YES","YES",5831.00,101855,"CHECKING","lsoforenko@msn.com","4146664390045458","573-555-1121","608-555-3147","RW","406-555-7393"
+"1856","Winnie K Baltzell","----------SWANSEA","29652",90.0,"M","widowed","pensioner",12.0,"NO","NO","YES","NO",63877.00,101856,"CHECKING","wbaltzell@icloud.com",NULL,"307-555-4208","518-555-3173","TK","winniebaltzell@msn.com"
+"1857","Carmel U Desmet","Little Aston RoadAldridge","28235",69.0,"M","married","pensioner",30.0,"NO","NO","YES","NO",19058.76,101857,"CHECKING","desmet@de.ibm.com","36111111111111","775-555-9962","803-555-7315","BO","517-555-7874"
+"1857","Carmel U Desmet","Little Aston RoadAldridge","28235",69.0,"M","married","pensioner",30.0,"NO","NO","YES","NO",51529.24,201857,"SAVINGS","desmet@de.ibm.com","36111111111111","775-555-9962","803-555-7315","BO","517-555-7874"
+"1858","Gisela W Brooks","ErdingtonBirmingham","59414-0528",50.0,"M","married","farmer",28.0,"YES","NO","YES","YES",35164.00,101858,"SAVINGS","brooks@web.de","4024 0071 2159 5481","602-555-8229",NULL,"MV","gisela_brooks@icloud.com"
+"1859","Lon Q Geoghegan","224 Marsh HillBirmingham","32809-5500",23.0,"M","single","inactive",5.0,"NO","YES","NO","YES",131.00,101859,"SAVINGS","lgeoghegan@msn.com","30411111111111","302-555-9840",NULL,"VA","615-555-9074"
+"1860","Jurij Q Shirley","Blucher StreetBirmingham","77305-1135",38.0,"M","single","intermediate professions",23.0,"NO","NO","NO","YES",7656.12,101860,"SAVINGS","jshirley@t-online.de","340000000000009","404-555-5591",NULL,"VU","617-555-7089"
+"1860","Jurij Q Shirley","Blucher StreetBirmingham","77305-1135",38.0,"M","single","intermediate professions",23.0,"NO","NO","NO","YES",20699.88,201860,"CHECKING","jshirley@t-online.de","340000000000009","404-555-5591",NULL,"VU","617-555-7089"
+"1861","Chelle E Garner","Sherbourne DriveTilbrook Milton Keyn","85011",46.0,"F","married","employee",23.0,"NO","NO","YES","NO",111.00,101861,"SAVINGS","chelle_garner@gmx.net","213130951856200","617-555-1784",NULL,"LA","512-555-750"
+"1862","Nina S Branham","6/8 HIGH STREETCHESHIRE","75185",47.0,"M","married","worker",23.0,"NO","YES","YES","YES",1044.00,101862,"CHECKING","nina.branham@msn.com",NULL,"608-555-8439","417-555-2229","HU","385-555-9611"
+"1863","Greta K Segura","Central Trading EstateStaines","30060-2358",51.0,"F","married","intermediate professions",9.0,"NO","YES","NO","NO",2263.68,101863,"CHECKING","greta_s@de.ibm.com","5169-7990-9185-4334","904-555-8628",NULL,"SH","greta_segura@msn.com"
+"1863","Greta K Segura","Central Trading EstateStaines","30060-2358",51.0,"F","married","intermediate professions",9.0,"NO","YES","NO","NO",6120.32,201863,"SAVINGS","greta_s@de.ibm.com","5169-7990-9185-4334","904-555-8628",NULL,"SH","greta_segura@msn.com"
+"1864","Sonia W Mcginnis","---------- RETURN TO LYNN CLAYPOOL HOUSTON","77042-6700",71.0,"F","widowed","inactive",18.0,"NO","NO","NO","YES",20231.00,101864,"CHECKING","mcginnis@t-online.de","4146664390045458","334-555-4316","401-555-1254","PN","sonia_mcginnis@gmx.net"
+"1865","Savannah Z Birk","----------Worthing","77041-5361",47.0,"M","single","farmer",4.0,"YES","NO","NO","NO",106796.00,101865,"CHECKING","savannah_birk@aol.com","5285696282092972","317-555-1103",NULL,"NP","225-555-2589"
+"1866","Staurt E Auger","Bellbrook ParkUckfield","29171-2239",62.0,"F","widowed","inactive",13.0,"NO","NO","NO","YES",9425.43,101866,"SAVINGS","auger@msl.org","3400-000000-00009","802-555-3649","307-555-5955","ES","406-555-21"
+"1866","Staurt E Auger","Bellbrook ParkUckfield","29171-2239",62.0,"F","widowed","inactive",13.0,"NO","NO","NO","YES",25483.57,201866,"CHECKING","auger@msl.org","3400-000000-00009","802-555-3649","307-555-5955","ES","406-555-21"
+"1867","Marcel E Hersch","10 Mordaunt RoadLondon","32839-2406",47.0,"M","married","intermediate professions",22.0,"NO","YES","YES","YES",4203.00,101867,"SAVINGS","marcel_h@web.de",NULL,"303-555-970",NULL,"FO","785-555-2458"
+"1868","Claude W Border","Winyates GreenRedditch","99518-2364",65.0,"F","married","pensioner",30.0,"YES","NO","YES","NO",29474.00,101868,"SAVINGS","border@blue.com","30510011111111","385-555-7747","505-555-9584","UG","225-555-692"
+"1869","Elsa Z Redeemar","Rehouse Industrial EstateAldridge","12754",62.0,"F","married","inactive",21.0,"NO","NO","YES","NO",1451.52,101869,"SAVINGS","elsa_redeemar@yahoo.com","38111111111119","615-555-6784","303-555-9400","SK","615-555-1005"
+"1869","Elsa Z Redeemar","Rehouse Industrial EstateAldridge","12754",62.0,"F","married","inactive",21.0,"NO","NO","YES","NO",3924.48,201869,"CHECKING","elsa_redeemar@yahoo.com","38111111111119","615-555-6784","303-555-9400","SK","615-555-1005"
+"1870","Lee K Blaker","Erdington Erdington Birmingham","34639",24.0,"F","single","inactive",3.0,"NO","YES","NO","YES",1442.00,101870,"SAVINGS","lblaker@web.de","3530 1113 3330 0000","775-555-7575",NULL,"CY",NULL
+"1871","Koganti U Barnes","West Point Business ParkAndover","85234-5783",82.0,"M","married","pensioner",27.0,"NO","YES","YES","NO",55122.00,101871,"CHECKING","barnes@ccdef.net","30210111161229","614-555-6316",NULL,"OM","kbarnes@blue.com"
+"1872","Lew R Newberry","RIVERSIDE WAYCAMBERLEY SURREY GU","86406-8150",69.0,"F","widowed","farmer",26.0,"NO","NO","YES","NO",5347.35,101872,"CHECKING","lew_newberry@t-online.de","4146 6643 9004 5458","609-555-8941","360-555-3874","CI","803-555-766"
+"1872","Lew R Newberry","RIVERSIDE WAYCAMBERLEY SURREY GU","86406-8150",69.0,"F","widowed","farmer",26.0,"NO","NO","YES","NO",14457.65,201872,"SAVINGS","lew_newberry@t-online.de","4146 6643 9004 5458","609-555-8941","360-555-3874","CI","803-555-766"
+"1873","Meir S Lewis","Parkeston QuayHarwich","87111-3601",19.0,"F","single","inactive",6.0,"NO","NO","NO","YES",19895.00,101873,"CHECKING","lewis@msn.com","4024007121595481","601-555-4923",NULL,"IE","505-555-4172"
+"1874","Paul Q Benz","Parkeston QuayHarwich","99169-1312",36.0,"M","married","farmer",18.0,"YES","NO","YES","NO",28831.00,101874,"CHECKING","paul_b@msn.com","36111111111111","573-555-669",NULL,"CM","860-555-2183"
+"1875","Clay G Molis","RedditchWorcester","27605-0601",39.0,"M","married","executives,self-employed",2.0,"NO","NO","YES","YES",7658.82,101875,"CHECKING","claymolis@aol.com","30210111161229","802-555-590",NULL,"GW","molis@yahoo.com"
+"1875","Clay G Molis","RedditchWorcester","27605-0601",39.0,"M","married","executives,self-employed",2.0,"NO","NO","YES","YES",20707.18,201875,"SAVINGS","claymolis@aol.com","30210111161229","802-555-590",NULL,"GW","molis@yahoo.com"
+"1876","Angie V Henning","WittonBirmingham","77041-5361",85.0,"F","widowed","inactive",24.0,"NO","NO","NO","YES",3052.00,101876,"CHECKING","ahenning@web.de","4024 0071 2159 5481","803-555-6074","916-555-3702","HR",NULL
+"1877","Rod H Walters","Barkby RoadLeicester","92799-1117",42.0,"F","married","employee",13.0,"NO","YES","YES","NO",-713.00,101877,"CHECKING","rwalters@yahoo.com","30011111111119","405-555-2925","860-555-7782","MV","804-555-3444"
+"1878","Lisa-Diane H Wight","Arlington Way Sundorne RetailShrewsbury","8086",35.0,"F","single","inactive",9.0,"NO","YES","NO","YES",136.89,101878,"CHECKING","wight@aol.com","5383908528354962","406-555-2473",NULL,"YT","603-555-7346"
+"1878","Lisa-Diane H Wight","Arlington Way Sundorne RetailShrewsbury","8086",35.0,"F","single","inactive",9.0,"NO","YES","NO","YES",370.11,201878,"SAVINGS","wight@aol.com","5383908528354962","406-555-2473",NULL,"YT","603-555-7346"
+"1879","Bizhan U Jacobus","Canal Road Canal Road Leeds","77087-4126",32.0,"M","divorced","worker",4.0,"NO","NO","NO","NO",2587.00,101879,"CHECKING","bizhan.jacobus@blue.com","5423111111111111","808-555-8766",NULL,"FK",NULL
+"1880","Walt J Brown","LOWER MIDDLETON STREETDERBYSHIRE","33710",67.0,"F","married","pensioner",28.0,"YES","NO","YES","NO",39500.00,101880,"CHECKING","brown@msn.com","3400 000000 00009","907-555-865","802-555-8355","JM","brown@cicn.gov"
+"1881","Lorretta Z Ard","Swiss CottageLondon","80104-3009",57.0,"M","married","craftsmen, storekeepers",22.0,"YES","NO","YES","NO",16964.91,101881,"SAVINGS","lorretta_ard@icloud.com","378282246310005","406-555-356",NULL,"AL","lorretta.ard@yahoo.com"
+"1881","Lorretta Z Ard","Swiss CottageLondon","80104-3009",57.0,"M","married","craftsmen, storekeepers",22.0,"YES","NO","YES","NO",45868.09,201881,"CHECKING","lorretta_ard@icloud.com","378282246310005","406-555-356",NULL,"AL","lorretta.ard@yahoo.com"
+"1882","Gino U Block","IDOTTSVAGEN 7SWEDEN","71134",75.0,"M","married","pensioner",2.0,"NO","NO","YES","YES",63397.00,101882,"SAVINGS","gblock@msl.org","5423111111111111","717-555-5871",NULL,"KG","919-555-4717"
+"1883","Bryon M Wheeldon","2 Tanners DriveMilton Keynes","85220-7002",67.0,"F","widowed","pensioner",15.0,"NO","NO","NO","NO",41921.00,101883,"SAVINGS","bryon.wheeldon@ccdef.net","4024007121595481","515-555-6215","406-555-3642","TJ","609-555-4110"
+"1884","Klaus I Trice","Witton P O BOX 660321 Birmingham","86404-2163",69.0,"M","married","pensioner",29.0,"NO","NO","YES","YES",8431.02,101884,"SAVINGS","ktrice@ibm.com","3528309518562063","515-555-3537","317-555-9500","BE","907-555-7606"
+"1884","Klaus I Trice","Witton P O BOX 660321 Birmingham","86404-2163",69.0,"M","married","pensioner",29.0,"NO","NO","YES","YES",22794.98,201884,"CHECKING","ktrice@ibm.com","3528309518562063","515-555-3537","317-555-9500","BE","907-555-7606"
+"1885","Wayne G Heltzel","TewkesburyGloucester","56502-1708",65.0,"F","single","employee",26.0,"NO","NO","NO","NO",5444.00,101885,"CHECKING",NULL,"5169 7990 9185 4334","843-555-3352",NULL,"BA","wayne_h@icloud.com"
+"1886","Almondo O Kurtz","Small HeathBirmingham","78642-6323",76.0,"M","married","pensioner",45.0,"NO","NO","YES","NO",32347.00,101886,"CHECKING","kurtz@gmx.net","5580977968891503","334-555-2083",NULL,"FO","615-555-3010"
+"1887","Dene F Hartwell","Barkby RoadLeicester","34475-5620",61.0,"F","married","inactive",21.0,"YES","NO","YES","NO",19445.67,101887,"CHECKING","dhartwell@msn.com","5383908528354962","717-555-8834","360-555-6582","FK","dene_hartwell@t-online.de"
+"1887","Dene F Hartwell","Barkby RoadLeicester","34475-5620",61.0,"F","married","inactive",21.0,"YES","NO","YES","NO",52575.33,201887,"SAVINGS","dhartwell@msn.com","5383908528354962","717-555-8834","360-555-6582","FK","dene_hartwell@t-online.de"
+"1888","Merion H Mccleery","Bellbrook ParkUckfield","56501-7002",67.0,"M","married","pensioner",19.0,"YES","NO","YES","YES",75194.00,101888,"CHECKING","merion.mccleery@blue.com",NULL,"608-555-8411",NULL,"PE","merion_mccleery@de.ibm.com"
+"1889","Marta W Cooley","Brickyard RoadWalsall","86004-3851",33.0,"M","single","worker",15.0,"YES","NO","NO","YES",11621.00,101889,"CHECKING","mcooley@web.de","5580977968891503","385-555-4985",NULL,"CZ","334-555-420"
+"1890","Viviana B Churchill","LAWFORD HEATH_IND. ESTATE RETURN TO AMY WATSON  CP651 RUGBY","34697-1348",58.0,"F","married","inactive",30.0,"NO","NO","YES","NO",1689.93,101890,"CHECKING","viviana.churchill@icloud.com","378282246310005","603-555-4856",NULL,"WF","303-555-1903"
+"1890","Viviana B Churchill","LAWFORD HEATH_IND. ESTATE RETURN TO AMY WATSON  CP651 RUGBY","34697-1348",58.0,"F","married","inactive",30.0,"NO","NO","YES","NO",4569.07,201890,"SAVINGS","viviana.churchill@icloud.com","378282246310005","603-555-4856",NULL,"WF","303-555-1903"
+"1891","Andrea K Aarestad","STAFFORD PARK 4SHROPSHIRE","32855-5433",32.0,"F","separated","employee",2.0,"NO","NO","NO","YES",1309.00,101891,"SAVINGS","andrea_aarestad@ibm.com","5169799091854334","603-555-8234",NULL,"TC","225-555-5233"
+"1892","Rexford A Yaupon","Castletown WaySunderland","86406-7339",20.0,"M","single","executives,self-employed",3.0,"NO","NO","NO","YES",14600.00,101892,"SAVINGS","rexford_yaupon@gmx.net","30011111111119","401-555-5678",NULL,"LR","919-555-875"
+"1893","Landen K Pugh","502 HONEYPOT LANESTANMORE MIDDLESEX","34446",46.0,"M","married","worker",21.0,"NO","YES","YES","YES",967.68,101893,"SAVINGS","landen_pugh@t-online.de","30011111111119","904-555-2332","401-555-5166","MR","401-555-2104"
+"1893","Landen K Pugh","502 HONEYPOT LANESTANMORE MIDDLESEX","34446",46.0,"M","married","worker",21.0,"NO","YES","YES","YES",2616.31,201893,"CHECKING","landen_pugh@t-online.de","30011111111119","904-555-2332","401-555-5166","MR","401-555-2104"
+"1894","Vikki I Okita","ABERCRAVE CAERBONTSWANSEA","29650-4724",43.0,"M","single","executives,self-employed",10.0,"NO","NO","NO","NO",-77716.00,101894,"SAVINGS","okita@blue.com","4024 0071 2159 5481","609-555-8714",NULL,"KP","503-555-6301"
+"1895","Herman N Falk","14B Bradford StreetShifnal","30024-7120",44.0,"F","married","inactive",9.0,"NO","YES","YES","NO",-857.00,101895,"SAVINGS","hfalk@gmx.net","5169-7990-9185-4334","850-555-3215",NULL,"TZ","falk@msl.org"
+"1896","Stafford S Kerr","3-5 swallow placwMayfair 3-5 swallow placwMayfair London","75391",35.0,"M","divorced","worker",6.0,"NO","YES","NO","NO",1615.41,101896,"SAVINGS","stafford_k@cicn.gov","213130951856200","850-555-8725","916-555-6026","ZA","stafford_k@aol.com"
+"1896","Stafford S Kerr","3-5 swallow placwMayfair 3-5 swallow placwMayfair London","75391",35.0,"M","divorced","worker",6.0,"NO","YES","NO","NO",4367.59,201896,"CHECKING","stafford_k@cicn.gov","213130951856200","850-555-8725","916-555-6026","ZA","stafford_k@aol.com"
+"1897","Bobbie J Loseke","27 Sandy LaneBirmingham","59911-6536",29.0,"M","single","worker",9.0,"NO","YES","NO","YES",8166.00,101897,"SAVINGS","bobbie_l@gmail.com","5462522444922689","614-555-4654","512-555-5420","HM","334-555-8923"
+"1898","Quincy L Garrigues","9300 NORMANDY BLVD BLDG 4JACKSONVILLE","32221-5522",73.0,"F","widowed","pensioner",20.0,"NO","NO","NO","NO",10496.00,101898,"SAVINGS","garrigues@cicn.gov","5423111111111111","225-555-5023",NULL,"AL","417-555-2304"
+"1899","Leigh L Collis","White Horse Business ParkTrowbridge","29063-9071",77.0,"M","married","pensioner",26.0,"NO","NO","NO","NO",2209.68,101899,"CHECKING","lcollis@ccdef.net","30510011111111","512-555-9659",NULL,"AQ","907-555-4455"
+"1899","Leigh L Collis","White Horse Business ParkTrowbridge","29063-9071",77.0,"M","married","pensioner",26.0,"NO","NO","NO","NO",5974.32,201899,"SAVINGS","lcollis@ccdef.net","30510011111111","512-555-9659",NULL,"AQ","907-555-4455"
+"1900","Ray V Cribbs","King Norton P O BOX 660321 Birmingham","32609-5626",40.0,"M","divorced","worker",6.0,"NO","NO","NO","YES",160434.00,101900,"CHECKING","ray.cribbs@de.ibm.com","4146-6643-9004-5458","907-555-1991","515-555-1231","TF","401-555-1061"
+"1901","Michael B Grossnickle","ALTHORP ROAD P O BOX 660321 LONDON","77304-3304",45.0,"M","married","executives,self-employed",19.0,"NO","NO","YES","YES",2185.00,101901,"CHECKING","mgrossnickle@de.ibm.com","4024007121595481","302-555-4068","651-555-4760","GW","518-555-5326"
+"1902","Ming Y Gibson","CROMWELL ROADCAMBS.","29169-4763",34.0,"F","single","employee",5.0,"NO","NO","NO","YES",1891.89,101902,"CHECKING","ming_g@web.de","5285696282092972","617-555-8956",NULL,"MU","334-555-8888"
+"1902","Ming Y Gibson","CROMWELL ROADCAMBS.","29169-4763",34.0,"F","single","employee",5.0,"NO","NO","NO","YES",5115.11,201902,"SAVINGS","ming_g@web.de","5285696282092972","617-555-8956",NULL,"MU","334-555-8888"
+"1903","Armond W Solerg","DARTMOUTH ROADWEST MIDLANDS","85706",61.0,"F","married","inactive",31.0,"NO","NO","YES","NO",142347.00,101903,"CHECKING","solerg@de.ibm.com","5169 7990 9185 4334","401-555-1267","401-555-2795","MQ","602-555-3463"
+"1904","Leonard W Cherella","3-5 SWALLOW PLACELondon","67501",75.0,"F","married","pensioner",27.0,"YES","NO","YES","NO",18529.00,101904,"CHECKING","leonardcherella@blue.com","5285696282092972","402-555-8627","225-555-9326","IT","402-555-133"
+"1905","Lorie S Davis","Church RoadBristol","33920-3837",84.0,"M","married","pensioner",27.0,"YES","NO","YES","NO",5002.83,101905,"CHECKING","lorie_d@cicn.gov","3400 000000 00009","307-555-2746",NULL,"SN","lorie_davis@web.de"
+"1905","Lorie S Davis","Church RoadBristol","33920-3837",84.0,"M","married","pensioner",27.0,"YES","NO","YES","NO",13526.17,201905,"SAVINGS","lorie_d@cicn.gov","3400 000000 00009","307-555-2746",NULL,"SN","lorie_davis@web.de"
+"1906","Cleatus T Ratajczak","183 Great Howard StreetLiverpool","77429",40.0,"F","married","intermediate professions",3.0,"NO","NO","YES","NO",2407.00,101906,"CHECKING","cratajczak@ibm.com","5169799091854334","850-555-4948",NULL,"UZ","515-555-3421"
+"1907","Ann N Wiles","Springlakes Industrial EstateAldershot","85745",33.0,"M","single","intermediate professions",13.0,"NO","YES","NO","YES",-1366.00,101907,"CHECKING",NULL,"180030951856201","503-555-5460",NULL,"VU","ann_w@aol.com"
+"1908","Loyd E Devlin","Wood Lane EndHemel Hempstead","86403-9368",46.0,"F","single","inactive",14.0,"YES","NO","NO","YES",180.63,101908,"CHECKING","loyddevlin@gmail.com","340000000000009","904-555-106","302-555-6563","MA","401-555-4405"
+"1908","Loyd E Devlin","Wood Lane EndHemel Hempstead","86403-9368",46.0,"F","single","inactive",14.0,"YES","NO","NO","YES",488.37,201908,"SAVINGS","loyddevlin@gmail.com","340000000000009","904-555-106","302-555-6563","MA","401-555-4405"
+"1909","Bee U Mitchell","120 VYSE STREETBIRMINGHAM","63043",30.0,"M","single","employee",11.0,"YES","NO","NO","YES",4265.00,101909,"SAVINGS","bee.mitchell@aol.com","4146664390045458","615-555-2379",NULL,"MK","bee.mitchell@t-online.de"
+"1910","Roth B Dismukes","Great YarmouthNorfolk","29407",23.0,"F","single","inactive",4.0,"NO","YES","NO","YES",-233.00,101910,"SAVINGS","roth.dismukes@aol.com","4146664390045458","615-555-8744","802-555-8004","AG","775-555-4373"
+"1911","Henning P Olson","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27613-1103",21.0,"F","single","inactive",3.0,"NO","NO","NO","YES",718.47,101911,"SAVINGS","henning_o@aol.com","30510011111111","402-555-9830",NULL,"SO","501-555-8264"
+"1911","Henning P Olson","COLISEUM BUSINESS CENTRE REVERCAMBERLEY SURREY","27613-1103",21.0,"F","single","inactive",3.0,"NO","NO","NO","YES",1942.53,201911,"CHECKING","henning_o@aol.com","30510011111111","402-555-9830",NULL,"SO","501-555-8264"
+"1912","Cathrine G Subramaniam","35 Livery StreetBirmingham","46038",28.0,"F","single","inactive",17.0,"YES","NO","NO","YES",729.00,101912,"SAVINGS","subramaniam@cicn.gov","5169 7990 9185 4334","385-555-7570",NULL,"CH","850-555-132"
+"1913","Sally D Fagan","Breakspear WayHemel Hempstead","85215-1111",22.0,"M","single","inactive",11.0,"NO","NO","NO","YES",2192.00,101913,"SAVINGS","sfagan@gmx.net","5169799091854334","202-555-1870","916-555-2217","CO","843-555-2050"
+"1914","Doyle I Davidson","Trafalgar WayCamberley","10011",71.0,"M","married","pensioner",11.0,"NO","NO","NO","YES",25615.71,101914,"SAVINGS","doyle_d@aol.com","3528-3095-1856-2063","775-555-4616",NULL,"CX",NULL
+"1914","Doyle I Davidson","Trafalgar WayCamberley","10011",71.0,"M","married","pensioner",11.0,"NO","NO","NO","YES",69257.29,201914,"CHECKING","doyle_d@aol.com","3528-3095-1856-2063","775-555-4616",NULL,"CX",NULL
+"1915","Nboc L Erwin","Park Farm Industrial EstateWellingborough","34761",23.0,"F","single","inactive",4.0,"NO","NO","NO","YES",7991.00,101915,"SAVINGS","nboc.erwin@cicn.gov","5580977968891503","850-555-4072",NULL,"HN","302-555-4216"
+"1916","Henning S Willey","LympheHythe","78666",74.0,"F","married","pensioner",22.0,"YES","NO","YES","YES",1263.00,101916,"SAVINGS","willey@aol.com","4024-0071-2159-5481","502-555-7834","515-555-3800","IS","henning_willey@icloud.com"
+"1917","Cathi S Baner","UrmstonManchester","77535-3014",45.0,"M","married","worker",23.0,"NO","NO","YES","YES",1258.47,101917,"SAVINGS","baner@cicn.gov","5169-7990-9185-4334","417-555-5700","505-555-9175","GH","803-555-4495"
+"1917","Cathi S Baner","UrmstonManchester","77535-3014",45.0,"M","married","worker",23.0,"NO","NO","YES","YES",3402.52,201917,"CHECKING","baner@cicn.gov","5169-7990-9185-4334","417-555-5700","505-555-9175","GH","803-555-4495"
+"1918","Lona V Hutts","Dinwall RoadCroydon","77388",44.0,"M","married","intermediate professions",15.0,"NO","NO","YES","NO",3199.00,101918,"SAVINGS","hutts@gmx.net","38111111111119","515-555-4756",NULL,"KR","785-555-6344"
+"1919","Shaye M Kardell","Small HeathBirmingham","86406-7746",50.0,"M","married","farmer",27.0,"NO","NO","NO","YES",18134.00,101919,"SAVINGS","kardell@de.ibm.com","6220264390045758","385-555-3017","609-555-2609","GY","907-555-5742"
+"1920","Aldona T Sonido","GolborneWarrington","33409-5274",23.0,"F","single","inactive",5.0,"NO","YES","NO","YES",59.94,101920,"SAVINGS","aldona.sonido@icloud.com","378282246310005","517-555-8650",NULL,"IN","850-555-2694"
+"1920","Aldona T Sonido","GolborneWarrington","33409-5274",23.0,"F","single","inactive",5.0,"NO","YES","NO","YES",162.06,201920,"CHECKING","aldona.sonido@icloud.com","378282246310005","517-555-8650",NULL,"IN","850-555-2694"
+"1921","Michelle I Duzenack","ASTON HALL RoadBIRMINGHAM","28270",81.0,"F","widowed","pensioner",3.0,"NO","NO","NO","YES",8585.00,101921,"CHECKING","michelle_duzenack@msn.com","5423111111111111","402-555-3653",NULL,"IN","803-555-5560"
+"1922","Sheldon J Peter","SMETHWICK WARLEYWEST MIDLANDS","30308-2101",57.0,"F","married","employee",20.0,"NO","NO","NO","NO",8232.00,101922,"CHECKING","sheldon_p@msl.org","30411111111111","307-555-8834","505-555-2924","CO",NULL
+"1923","Kyong-Ok Y Lord","Round SpinneyNorthampton","28247",40.0,"M","married","worker",18.0,"NO","NO","YES","YES",1166.13,101923,"CHECKING","kyong-ok_lord@t-online.de","3528309518562063","404-555-4752","202-555-5436","GT","515-555-9061"
+"1923","Kyong-Ok Y Lord","Round SpinneyNorthampton","28247",40.0,"M","married","worker",18.0,"NO","NO","YES","YES",3152.87,201923,"SAVINGS","kyong-ok_lord@t-online.de","3528309518562063","404-555-4752","202-555-5436","GT","515-555-9061"
+"1924","Joann L Adachi","10 Mordaunt RoadLondon","13214-1852",31.0,"M","cohabitant","employee",1.0,"NO","YES","NO","YES",-21.00,101924,"CHECKING","joann_adachi@aol.com","30411111111111","417-555-2980",NULL,"PA","jadachi@icloud.com"
+"1925","Bev B Pelaez","Breakspear WayHemel Hempstead","30152-3883",35.0,"F","married","employee",14.0,"NO","YES","YES","NO",16677.00,101925,"CHECKING","pelaez@ccdef.net","180030951856201","334-555-9054",NULL,"MU",NULL
+"1926","Gunther I Laninga","Winyates GreenRedditch","80216",30.0,"M","single","worker",10.0,"NO","YES","NO","NO",552.96,101926,"CHECKING","laninga@aol.com","5462522444922689","401-555-4318","303-555-2960","MS","334-555-8299"
+"1926","Gunther I Laninga","Winyates GreenRedditch","80216",30.0,"M","single","worker",10.0,"NO","YES","NO","NO",1495.04,201926,"SAVINGS","laninga@aol.com","5462522444922689","401-555-4318","303-555-2960","MS","334-555-8299"
+"1927","Nathaniel I Campoy","West GateLondon","33155-4637",64.0,"F","widowed","pensioner",19.0,"NO","NO","NO","YES",24376.00,101927,"SAVINGS","campoy@gmail.com","4146664390045458","515-555-7934","502-555-3977","CV","ncampoy@gmx.net"
+"1928","Ralston M Kortepeter","10 MORDAUNT ROADLONDON","85277-1028",57.0,"M","single","worker",9.0,"NO","NO","NO","YES",21414.00,101928,"SAVINGS","kortepeter@cicn.gov","5520111111111121","503-555-800","907-555-4655","PF","406-555-824"
+"1929","Jerald O Thurman","10 MORDAUNT ROADLONDON","32086",53.0,"M","married","craftsmen, storekeepers",17.0,"YES","NO","YES","NO",-654.48,101929,"SAVINGS",NULL,"4146 6643 9004 5458","808-555-7447",NULL,"SJ","jerald_thurman@aol.com"
+"1929","Jerald O Thurman","10 MORDAUNT ROADLONDON","32086",53.0,"M","married","craftsmen, storekeepers",17.0,"YES","NO","YES","NO",-1769.52,201929,"CHECKING",NULL,"4146 6643 9004 5458","808-555-7447",NULL,"SJ","jerald_thurman@aol.com"
+"1930","Elliot F Young","Boyatt WoodEastleigh","32773",74.0,"F","widowed","inactive",0.0,"NO","NO","NO","YES",24911.00,101930,"SAVINGS","eyoung@t-online.de","5580977968891503","503-555-6513",NULL,"IE","603-555-4890"
+"1931","Brantley G Staton","Willen LakeMilton Keynes","33845",73.0,"F","divorced","pensioner",1.0,"NO","NO","NO","NO",1303.00,101931,"SAVINGS","brantleystaton@t-online.de","5462522444922689","701-555-1567",NULL,"TJ","615-555-772"
+"1932","Emelia X Read","148 Edmund StreetBirmingham","77255-5649",57.0,"F","married","worker",19.0,"NO","NO","YES","NO",9569.88,101932,"SAVINGS","emelia_read@ibm.com","5423111111111111","609-555-6517",NULL,"FX","785-555-1054"
+"1932","Emelia X Read","148 Edmund StreetBirmingham","77255-5649",57.0,"F","married","worker",19.0,"NO","NO","YES","NO",25874.12,201932,"CHECKING","emelia_read@ibm.com","5423111111111111","609-555-6517",NULL,"FX","785-555-1054"
+"1933","Marian E Grelle","Trafalgar WayCamberley","99163-0306",49.0,"M","married","farmer",25.0,"YES","NO","NO","YES",39003.00,101933,"SAVINGS","mgrelle@ibm.com","6011567891012132","402-555-4502",NULL,"MD","marian_g@yahoo.com"
+"1934","Rosalie I Perillo","HatfieldHertfordshire","77080-8111",76.0,"F","widowed","pensioner",10.0,"NO","YES","NO","YES",32729.00,101934,"SAVINGS","rosalie_perillo@gmail.com","30011111111119","515-555-2600","505-555-5425","CF","307-555-2660"
+"1935","Mel R Polsky","Small HeathBirmingham","29577-6688",82.0,"F","married","pensioner",1.0,"NO","NO","YES","NO",7488.72,101935,"SAVINGS","mel_p@gmx.net","30310111161029","334-555-562",NULL,"TV","601-555-2648"
+"1935","Mel R Polsky","Small HeathBirmingham","29577-6688",82.0,"F","married","pensioner",1.0,"NO","NO","YES","NO",20247.28,201935,"CHECKING","mel_p@gmx.net","30310111161029","334-555-562",NULL,"TV","601-555-2648"
+"1936","Kellie P Hans","WimborneDorset","34471",72.0,"M","married","pensioner",12.0,"NO","NO","YES","NO",75368.00,101936,"SAVINGS","kellie_h@gmx.net","30510011111111","808-555-9693",NULL,"MO","401-555-4063"
+"1937","Mila B Cain","LOWER MIDDLETON STREETDERBYSHIRE","77375",59.0,"M","married","worker",19.0,"YES","NO","YES","NO",8991.00,101937,"SAVINGS","mila_cain@msl.org",NULL,"614-555-4014","651-555-785","HT","417-555-3100"
+"1938","Tyrell T Biltucci","----------Worthing","86325-0999",52.0,"M","single","worker",16.0,"NO","NO","NO","YES",3316.95,101938,"SAVINGS","tbiltucci@cicn.gov","6011567891012132","317-555-1748","617-555-8293","VI","202-555-3068"
+"1938","Tyrell T Biltucci","----------Worthing","86325-0999",52.0,"M","single","worker",16.0,"NO","NO","NO","YES",8968.05,201938,"CHECKING","tbiltucci@cicn.gov","6011567891012132","317-555-1748","617-555-8293","VI","202-555-3068"
+"1939","Chas Y Putz","Lymphe Lymphe Hythe","86406-7533",23.0,"F","single","employee",0.0,"NO","YES","NO","YES",75.00,101939,"SAVINGS","chas_p@de.ibm.com","5169 7990 9185 4334","515-555-6514","360-555-3130","MX",NULL
+"1940","Kenneth O Wills","60 FREDERICK STREETBIRMINGHAM","32333",81.0,"F","widowed","pensioner",13.0,"NO","NO","NO","NO",25606.00,101940,"SAVINGS","kenneth_w@de.ibm.com","5383908528354962","503-555-8593","785-555-538","KE",NULL
+"1941","Jairo M Alva","BEDLINGTONNORTHUMBERLAND","77429-7078",72.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",3687.39,101941,"SAVINGS","alva@aol.com","5580977968891503","417-555-9197",NULL,"IS","334-555-3149"
+"1941","Jairo M Alva","BEDLINGTONNORTHUMBERLAND","77429-7078",72.0,"M","married","pensioner",26.0,"YES","NO","YES","NO",9969.61,201941,"CHECKING","alva@aol.com","5580977968891503","417-555-9197",NULL,"IS","334-555-3149"
+"1942","Mohammed V Dieson","WittonBirmingham","33316-4109",71.0,"M","married","inactive",21.0,"YES","NO","YES","NO",28431.00,101942,"SAVINGS","mdieson@ibm.com","6520224090045455","505-555-4586","904-555-367","MK","802-555-3024"
+"1943","Fergus N Gray","DARNALL ROADSHEFFIELD","80014",67.0,"F","married","craftsmen, storekeepers",8.0,"NO","NO","YES","YES",160193.00,101943,"SAVINGS","fgray@web.de","36111111111111","406-555-8339",NULL,"CA","fergusgray@aol.com"
+"1944","Ferrell B Garrett","Berkswell RoadMeriden","78408",74.0,"M","married","pensioner",31.0,"YES","NO","YES","YES",8044.92,101944,"CHECKING","ferrell_g@msl.org","30510011111111","617-555-2208",NULL,"SM","334-555-4449"
+"1944","Ferrell B Garrett","Berkswell RoadMeriden","78408",74.0,"M","married","pensioner",31.0,"YES","NO","YES","YES",21751.07,201944,"SAVINGS","ferrell_g@msl.org","30510011111111","617-555-2208",NULL,"SM","334-555-4449"
+"1945","Vashti E Dobson","Breakspear WayHemel Hempstead","33139-3031",69.0,"F","married","pensioner",29.0,"YES","NO","YES","NO",208912.00,101945,"CHECKING","vdobson@cicn.gov","340000000000009","803-555-4490","405-555-1959","AL","605-555-1796"
+"1946","Zana O Corona","ROUGH HEY ROADS ROUGH HEY ROADS LANCS.","33908",76.0,"F","married","inactive",35.0,"NO","NO","YES","NO",47479.00,101946,"CHECKING","zana_corona@icloud.com","5520111111111121","405-555-1097",NULL,"LA","405-555-2358"
+"1947","Debbie Q Dupier","RIVERSIDE WAY RIVERSIDE WAY CAMBERLEY SURREY GU","85299",23.0,"M","single","inactive",11.0,"NO","NO","NO","YES",2437.02,101947,"CHECKING","debbie_dupier@msl.org","38111111111119","617-555-5157","602-555-2995","PL","803-555-9376"
+"1947","Debbie Q Dupier","RIVERSIDE WAY RIVERSIDE WAY CAMBERLEY SURREY GU","85299",23.0,"M","single","inactive",11.0,"NO","NO","NO","YES",6588.98,201947,"SAVINGS","debbie_dupier@msl.org","38111111111119","617-555-5157","602-555-2995","PL","803-555-9376"
+"1948","Fulton Q Marx","----------WARWICK","34292",32.0,"M","single","worker",12.0,"NO","YES","NO","YES",13509.00,101948,"CHECKING",NULL,"4024-0071-2159-5481","808-555-1439","317-555-7266","LC","605-555-9650"
+"1949","Cheri I Griego","6/8 HIGH STREETCHESHIRE","99515-2051",46.0,"F","single","employee",2.0,"NO","NO","NO","YES",4953.00,101949,"CHECKING","cheri_griego@msl.org","6011567891012132","505-555-7297",NULL,"RE","cheri.griego@ccdef.net"
+"1950","Shelley H Saarey","3322 PALMTREE DRLAKE HAVASU CITY AZ","86404-1623",32.0,"M","married","worker",6.0,"YES","NO","YES","YES",-92.88,101950,"SAVINGS","shelleysaarey@web.de","4146-6643-9004-5458","208-555-6851",NULL,"BW","385-555-157"
+"1950","Shelley H Saarey","3322 PALMTREE DRLAKE HAVASU CITY AZ","86404-1623",32.0,"M","married","worker",6.0,"YES","NO","YES","YES",-251.12,201950,"CHECKING","shelleysaarey@web.de","4146-6643-9004-5458","208-555-6851",NULL,"BW","385-555-157"
+"1951","Jennifer Q Nettles","14216 HWY 90 WDEFUNIAK SPRINGS FL","32433",71.0,"M","widowed","pensioner",26.0,"NO","NO","NO","NO",48050.00,101951,"SAVINGS","jennifer_n@web.de","3530 1113 3330 0000","302-555-4412",NULL,"BW","503-555-2627"
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties
new file mode 100755
index 0000000..2bf7347
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/sample-data-toc.properties
@@ -0,0 +1,17 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This file contains a list of the ODF sample data files
+simple-example-table.csv=
+simple-example-document.txt=
+bank-clients-short.csv=
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt
new file mode 100755
index 0000000..6bdeca2
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-document.txt
@@ -0,0 +1 @@
+This is a simple example text.
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv
new file mode 100755
index 0000000..adbd1ab
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/sampledata/simple-example-table.csv
@@ -0,0 +1,4 @@
+OMColumnName1,OMColumnName2
+aaaa,1
+bbbb,2
+cccc,3
diff --git a/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt
new file mode 100755
index 0000000..48d6e85
--- /dev/null
+++ b/odf/odf-core/src/main/resources/org/apache/atlas/odf/core/odfversion.txt
@@ -0,0 +1 @@
+1.2.0-SNAPSHOT
\ No newline at end of file
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java
new file mode 100755
index 0000000..587ae30
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreBase.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
+import org.apache.atlas.odf.core.configuration.ConfigManager;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1;
+
+public class ODFAPITestWithMetadataStoreBase extends ODFTestBase {
+
+	@Before
+	public void createSampleData() throws Exception {
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		mds.resetAllData();
+		mds.createSampleData();
+	}
+
+	@BeforeClass
+	public static void registerServices() throws Exception {
+		ConfigContainer config = JSONUtils.readJSONObjectFromFileInClasspath(ConfigContainer.class, "org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json",
+				ODFAPITestWithMetadataStoreBase.class.getClassLoader());
+		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
+		configManager.updateConfigContainer(config);
+	}
+
+	protected List<MetaDataObjectReference> getTables(MetadataStore mds) {
+		List<MetaDataObjectReference> dataSets = mds.search(mds.newQueryBuilder().objectType("DataFile").build());
+		Assert.assertTrue(dataSets.size() > 0);
+		// take only maximal 5 data sets
+		int MAX_DATASETS = 5;
+		if (dataSets.size() > MAX_DATASETS) {
+			dataSets = dataSets.subList(0, MAX_DATASETS);
+		}
+		return dataSets;
+	}
+
+	public String test(String dsId, List<MetaDataObjectReference> dataSets, AnalysisRequestStatus.State expectedFinalState, boolean requestIsInvalid, String correlationId) throws Exception {
+		log.log(Level.INFO, "Testing ODF with metadata store. Discovery service Id: {0}, dataSets: {1}, expected state: {2}, correlationId: {3}, should request be invalid: {4}", new Object[] { dsId,
+				dataSets, expectedFinalState, correlationId, requestIsInvalid });
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		Assert.assertTrue(dataSets.size() > 0);
+
+		Assert.assertNotNull(mds);
+		AnalysisRequest request = new AnalysisRequest();
+		request.setDiscoveryServiceSequence(Collections.singletonList(dsId));
+		request.setDataSets(dataSets);
+		Map<String, Object> additionalProps = new HashMap<String, Object>();
+		additionalProps.put(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID, correlationId);
+		request.setAdditionalProperties(additionalProps);
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		AnalysisResponse resp = analysisManager.runAnalysis(request);
+
+		log.info("Analysis started on data sets: " + dataSets + ", response: " + JSONUtils.toJSON(resp));
+		log.info("Response message: " + resp.getDetails());
+		if (requestIsInvalid) {
+			Assert.assertTrue(resp.isInvalidRequest());
+			return null;
+		}
+
+		Assert.assertFalse(resp.isInvalidRequest());
+		String id = resp.getId();
+		AnalysisRequestStatus status = null;
+		int maxPolls = 100;
+		do {
+			status = analysisManager.getAnalysisRequestStatus(id);
+			log.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
+					expectedFinalState });
+			maxPolls--;
+			Thread.sleep(1000);
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED));
+		log.log(Level.INFO, "Expected state: {0}, actual state: {1}", new Object[] { expectedFinalState, status.getState() });
+		Assert.assertEquals(expectedFinalState, status.getState());
+		return resp.getId();
+	}
+
+	public void checkMostRecentAnnotations(MetadataStore mds, AnnotationStore as, MetaDataObjectReference ref) {
+		Map<MetaDataObjectReference, MetaDataObject> ref2Retrieved = new HashMap<>();
+		for (Annotation annot : as.getAnnotations(ref, null)) {
+			ref2Retrieved.put(annot.getReference(), annot);
+		}
+
+		List<Annotation> mostRecentAnnotations = AnnotationStoreUtils.getMostRecentAnnotationsByType(as, ref);
+		Assert.assertNotNull(mostRecentAnnotations);
+		Assert.assertTrue(mostRecentAnnotations.size() <= ref2Retrieved.size());
+		Set<MetaDataObjectReference> mostRecentAnnoationRefs = new HashSet<>();
+		Set<String> annotationTypes = new HashSet<>();
+		for (Annotation annot : mostRecentAnnotations) {
+			// every annotation type occurs at most once
+			Assert.assertFalse( annotationTypes.contains(annot.getAnnotationType()));
+			mostRecentAnnoationRefs.add(annot.getReference());
+			annotationTypes.add(annot.getAnnotationType());
+		}
+
+		// all most recent annotations are a subset of all annotations
+		Assert.assertTrue(ref2Retrieved.keySet().containsAll(mostRecentAnnoationRefs));
+
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java
new file mode 100755
index 0000000..f0742aa
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreExtendedAnnotations.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.MyObject;
+import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.MyOtherObject;
+import org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations.SyncDiscoveryServiceAnnotation;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ODFAPITestWithMetadataStoreExtendedAnnotations extends ODFAPITestWithMetadataStoreBase {
+
+	@Test
+	public void testSuccessSyncExtendedAnnotations() throws Exception {
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+		List<MetaDataObjectReference> dataSets = getTables(mds);
+		String dsID = "synctestservice-with-extendedannotations";
+
+		String requestId = test(dsID, dataSets, State.FINISHED, false, null);
+
+		log.info("Checking if extended annotations exist for request ID: " + requestId);
+		for (MetaDataObjectReference dataSet : dataSets) {
+			List<SyncDiscoveryServiceAnnotation> annotations = new ArrayList<>();
+			List<Annotation> annots = as.getAnnotations(dataSet, null);
+			Assert.assertTrue(annots.size() >= 2);
+			
+			for (Annotation annot : annots) {		
+				Assert.assertNotNull(annot);
+				if (annot.getAnalysisRun().equals(requestId)) {
+					log.info("Found annotation: " + annot + ", json: " + JSONUtils.toJSON(annot));
+					Assert.assertNotNull(annot);
+					Assert.assertEquals(SyncDiscoveryServiceAnnotation.class, annot.getClass());
+					SyncDiscoveryServiceAnnotation extAnnot = (SyncDiscoveryServiceAnnotation) annot;
+					Assert.assertNotNull(extAnnot.getProp1());
+					Assert.assertEquals(extAnnot.getProp1().hashCode(), extAnnot.getProp2());
+					MyObject mo = extAnnot.getProp3();
+					Assert.assertNotNull(mo);
+					Assert.assertEquals("nested" + extAnnot.getProp1(), mo.getAnotherProp());
+					
+					MyOtherObject moo = mo.getYetAnotherProp();
+					Assert.assertNotNull(moo);
+					Assert.assertEquals("nestedtwolevels" + extAnnot.getProp1(), moo.getMyOtherObjectProperty());
+					annotations.add(extAnnot);
+				}
+			}
+			Assert.assertEquals(2, annotations.size());
+			// TODO check annotations list
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java
new file mode 100755
index 0000000..e47b316
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreJsonAnnotation.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest;
+
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ODFAPITestWithMetadataStoreJsonAnnotation extends ODFAPITestWithMetadataStoreBase {
+
+	Logger logger = ODFTestLogger.get();
+
+	String expectedJson = Utils.getInputStreamAsString(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json"), "UTF-8");
+
+	@Test
+	public void testSuccessSyncJsonAnnotations() throws Exception {
+
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+		List<MetaDataObjectReference> dataSets = getTables(mds);
+		String dsID = "synctestservice-with-json-annotations";
+
+		String requestId = test(dsID, dataSets, State.FINISHED, false, null);
+
+		log.info("Checking if annotations exist for request ID: " + requestId);
+		int numMatchingAnnotations = 0;
+		for (MetaDataObjectReference dataSet : dataSets) {
+			List<Annotation> annotationRefs = as.getAnnotations(dataSet, null);
+			Assert.assertTrue(annotationRefs.size() >= 1);
+			for (Annotation annot : annotationRefs) {
+				Assert.assertNotNull(annot);
+				if (annot.getAnalysisRun().equals(requestId)) {
+					log.info("Found annotation: " + annot + ", json: " + JSONUtils.toJSON(annot));
+					Assert.assertNotNull(annot);
+					String jsonProperties = annot.getJsonProperties();
+					Assert.assertNotNull(jsonProperties);
+					logger.info("Actual annotation string: " + jsonProperties + ". Expected json: " + expectedJson);
+					Assert.assertEquals(expectedJson, jsonProperties);
+					numMatchingAnnotations++;
+				}
+			}
+//			Assert.assertEquals(1, numMatchingAnnotations);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java
new file mode 100755
index 0000000..6b7c9b9
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/ODFAPITestWithMetadataStoreSimple.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+
+public class ODFAPITestWithMetadataStoreSimple extends ODFAPITestWithMetadataStoreBase {
+
+	public ODFAPITestWithMetadataStoreSimple() {
+		ODFTestBase.log.info("Classpath: " + System.getProperty("java.class.path"));
+	}
+
+	@Test
+	public void testSuccessASync() throws Exception {
+		testSuccess("asynctestservice-with-annotations");
+	}
+
+	@Test
+	public void testSuccessSync() throws Exception {
+		testSuccess("synctestservice-with-annotations");
+	}
+
+	void testSuccess(String dsId) throws Exception {
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+		List<MetaDataObjectReference> dataSets = getTables(mds);
+
+		String correlationId = UUID.randomUUID().toString();
+		
+		String requestId = test(dsId, dataSets, AnalysisRequestStatus.State.FINISHED, false, correlationId);
+		Thread.sleep(3000); // give time for notifications to arrive
+
+		List<MetaDataObjectReference> annotationsOfThisRun = new ArrayList<>();
+		
+		ODFTestBase.log.info("Checking if annotations exist");
+		for (MetaDataObjectReference dataSet : dataSets) {
+			List<Annotation> retrievedAnnotations = as.getAnnotations(dataSet, null);
+			Assert.assertTrue(retrievedAnnotations.size() > 0);
+			List<Annotation> annotations = new ArrayList<>();
+			for (Annotation annot : retrievedAnnotations) {
+				Assert.assertNotNull(annot);
+				Assert.assertNotNull(annot.getAnalysisRun());
+				if (annot.getAnalysisRun().equals(requestId)) {
+					annotationsOfThisRun.add(annot.getReference());
+					Assert.assertNotNull(annot.getJsonProperties());
+					JSONObject props = new JSONObject(annot.getJsonProperties());
+					if (props != null) {
+						String annotCorrId = (String) props.get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID);
+						if (annotCorrId != null) {
+							Assert.assertNotNull(annot.getAnnotationType());
+						}
+					}
+					annotations.add(annot);
+				}
+			}
+			ODFTestBase.log.info("Checking that annotation notifications were received");
+			// check that we got notified of all annotations
+			
+			// assume at least that those new annotations were created
+			Assert.assertTrue(TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations() <= annotations.size());
+			int found = 0;
+			for (int i = 0; i < TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations(); i++) {
+				String[] annotValues = TestSyncDiscoveryServiceWritingAnnotations1.getPropsOfNthAnnotation(i);
+				for (Annotation annotation : annotations) {
+					if (annotation.getAnnotationType() != null) {
+						if (annotation.getAnnotationType().equals(annotValues[0])) {
+							JSONObject jo = new JSONObject(annotation.getJsonProperties());
+							String foundCorrelationId = (String) jo.get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID);
+							// only look at those where the correlation ID property is set
+							if (correlationId.equals(foundCorrelationId)) {
+								String val = (String) jo.get(annotValues[1]);
+								Assert.assertEquals(annotValues[2], val);
+								Assert.assertEquals(requestId, annotation.getAnalysisRun());
+								// annotation types and the JSON properties match
+								found++;
+							}
+						}
+					}
+				}
+			}
+			// assert that we have found all and not more
+			Assert.assertEquals(TestSyncDiscoveryServiceWritingAnnotations1.getNumberOfAnnotations(), found);
+
+			checkMostRecentAnnotations(mds, new ODFFactory().create().getAnnotationStore(), dataSet);
+		}
+	}
+
+	
+	
+	@Test
+	public void testFailureASync() throws Exception {
+		testFailure("asynctestservice-with-annotations");
+	}
+
+	@Test
+	public void testFailureSync() throws Exception {
+		testFailure("synctestservice-with-annotations");
+	}
+
+	void testFailure(String dsId) throws Exception {
+		MetaDataObjectReference invalidRef = new MetaDataObjectReference();
+		invalidRef.setId("error-this-is-hopefully-an-invalid-id");
+		List<MetaDataObjectReference> dataSets = Collections.singletonList(invalidRef);
+		test(dsId, dataSets, AnalysisRequestStatus.State.ERROR, true, UUID.randomUUID().toString());
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java
new file mode 100755
index 0000000..af70b5a
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/connectivity/DataSetRetrieverTest.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.connectivity;
+
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.connectivity.DataSetRetriever;
+import org.apache.atlas.odf.api.connectivity.DataSetRetrieverImpl;
+import org.apache.atlas.odf.api.connectivity.JDBCRetrievalResult;
+import org.apache.atlas.odf.api.discoveryservice.datasets.MaterializedDataSet;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.integrationtest.metadata.importer.JDBCMetadataImporterTest;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+
+public class DataSetRetrieverTest extends ODFTestBase {
+
+	static Logger logger = ODFTestLogger.get();
+	
+	static MetadataStore createMetadataStore() throws Exception {
+		return new ODFFactory().create().getMetadataStore();
+	}
+	
+	@BeforeClass
+	public static void setupImport() throws Exception {
+		MetadataStore mds = createMetadataStore();
+		// create sample data only if it has not been created yet
+		mds.createSampleData();
+		JDBCMetadataImporterTest.runTestImport(mds);
+	}
+	
+	@Test
+	public void testDataSetRetrievalJDBC() throws Exception {
+		MetadataStore ams = createMetadataStore();
+		DataSetRetriever retriever = new DataSetRetrieverImpl(ams);
+		List<MetaDataObjectReference> refs = ams.search(ams.newQueryBuilder().objectType("Table").build());
+		Assert.assertTrue(refs.size() > 0);
+		int retrievedDataSets = 0;
+		for (MetaDataObjectReference ref : refs) {
+			Table table = (Table) ams.retrieve(ref);
+			logger.info("Retrieving table: " + table.getName() + ", " + table.getReference().getUrl());
+			if (retriever.canRetrieveDataSet(table)) {
+				retrievedDataSets++;
+				MaterializedDataSet mds = retriever.retrieveRelationalDataSet(table);
+				Assert.assertNotNull(mds);
+				Assert.assertEquals(table, mds.getTable());
+				int numberOfColumns = ams.getColumns(table).size();
+				Assert.assertEquals(numberOfColumns, mds.getColumns().size());
+				Assert.assertNotNull(mds.getData());
+				Assert.assertTrue(mds.getData().size() > 0);
+				for (List<Object> row : mds.getData()) {
+					Assert.assertEquals(row.size(),numberOfColumns);
+				}
+				
+				// now test JDBC method
+				JDBCRetrievalResult jdbcResult = retriever.retrieveTableAsJDBCResultSet(table);
+				ResultSet rs = jdbcResult.getPreparedStatement().executeQuery();
+				Assert.assertEquals(mds.getColumns().size(), rs.getMetaData().getColumnCount());
+				int count = 0;
+				while (rs.next()) {
+					count++;
+				}
+				Assert.assertEquals(mds.getData().size(), count);
+				
+				// only run one test
+				break;
+			}
+		}
+		Assert.assertEquals("Number of retrieved data sets does not meet the expected value. ", 1, retrievedDataSets);
+		
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java
new file mode 100755
index 0000000..47d3a3d
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/MetadataStoreTestBase.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.metadata;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.DefaultMetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.metadata.models.RelationshipAnnotation;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.models.ClassificationAnnotation;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Connection;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnectionInfo;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.Database;
+
+public abstract class MetadataStoreTestBase {
+	private Logger logger = Logger.getLogger(MetadataStoreTestBase.class.getName());
+	private static final String analysisRun = UUID.randomUUID().toString();
+
+	protected abstract MetadataStore getMetadataStore();
+
+	public static WritableMetadataStore getWritableMetadataStore() {
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		if (!(mds instanceof WritableMetadataStore)) {
+			String errorText = "The MetadataStore implementation ''{0}'' does not support the WritableMetadataStore interface.";
+			Assert.fail(MessageFormat.format(errorText , mds.getClass()));
+			return null;
+		}
+		return (WritableMetadataStore) mds;
+	}
+
+	public static void createAdditionalTestData(WritableMetadataStore mds) {
+		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
+
+		JDBCConnection connection = new JDBCConnection();
+		connection.setName("connection1");
+
+		Table table1 = new Table();
+		table1.setName("table1");
+		Table table2 = new Table();
+		table2.setName("table2");
+
+		Schema schema1 = new Schema();
+		schema1.setName("schema1");
+		MetaDataObjectReference schemaRef = mds.createObject(schema1);
+		mds.addTableReference(schema1, mds.createObject(table1));
+		mds.addTableReference(schema1, mds.createObject(table2));
+
+		Database dataStore = new Database();
+		dataStore.setName("database1");
+		mds.createObject(dataStore);
+		mds.addSchemaReference(dataStore, schemaRef);
+		mds.addConnectionReference(dataStore, mds.createObject(connection));
+
+		DataFile file1 = new DataFile();
+		file1.setName("file1");
+		DataFile file2 = new DataFile();
+		file2.setName("file2");
+
+		DataFileFolder nestedFolder = new DataFileFolder();
+		nestedFolder.setName("nestedFolder");
+		MetaDataObjectReference nestedFolderRef = mds.createObject(nestedFolder);
+		mds.addDataFileReference(nestedFolder, mds.createObject(file1));
+		mds.addDataFileReference(nestedFolder, mds.createObject(file2));
+
+		DataFileFolder rootFolder = new DataFileFolder();
+		rootFolder.setName("rootFolder");
+		mds.createObject(rootFolder);
+		mds.addDataFileFolderReference(rootFolder, nestedFolderRef);
+
+		ProfilingAnnotation pa = new ProfilingAnnotation();
+		pa.setName("A profiling annotation");
+		pa.setProfiledObject(bankClientsShortRef);
+		pa.setAnalysisRun(analysisRun);
+		mds.createObject(pa);
+
+		ClassificationAnnotation ca = new ClassificationAnnotation();
+		ca.setName("A classification annotation");
+		ca.setClassifiedObject(bankClientsShortRef);
+		ca.setAnalysisRun(analysisRun);
+		ca.setClassifyingObjects(Collections.singletonList(bankClientsShortRef));
+		mds.createObject(ca);
+
+		RelationshipAnnotation ra = new RelationshipAnnotation();
+		ra.setName("A relationship annotation");
+		ra.setRelatedObjects(Collections.singletonList(bankClientsShortRef));
+		ra.setAnalysisRun(analysisRun);
+		mds.createObject(ra);
+
+		mds.commit();
+	}
+
+	@Before
+	public void createSampleData() {
+		WritableMetadataStore mds = getWritableMetadataStore();
+		mds.resetAllData();
+		mds.createSampleData();
+		createAdditionalTestData(mds);
+	}
+
+	public static void checkQueryResults(MetadataStore mds, String[] expectedObjectNames, String searchTerm, boolean isSubset) {
+		HashSet<String> expectedResults = new HashSet<String>(Arrays.asList(expectedObjectNames));
+		List<MetaDataObjectReference> searchResult = mds.search(searchTerm);
+		Set<String> foundResults = new HashSet<>();
+		for (MetaDataObjectReference ref : searchResult) {
+			foundResults.add(mds.retrieve(ref).getName());
+		}
+		if (isSubset) {
+			String messageText = "Metadata search term ''{0}'' did not return expected subset of objects. Expected ''{1}'' but received ''{2}''.";
+			Assert.assertTrue(MessageFormat.format(messageText, new Object[] {searchTerm, expectedResults, foundResults}), foundResults.containsAll(expectedResults));
+		} else {
+			String messageText = "Metadata search term ''{0}'' did not return expected results. Expected ''{1}'' but received ''{2}''.";
+			Assert.assertTrue(MessageFormat.format(messageText, new Object[] {searchTerm, expectedResults, foundResults}), foundResults.equals(expectedResults));
+		}
+	}
+
+	public static void checkReferencedObjects(String[] expectedObjectNames, List<? extends MetaDataObject> referencedObjects, boolean isSubset) {
+		HashSet<String> expectedResults = new HashSet<String>(Arrays.asList(expectedObjectNames));
+		Set<String> actualNames = new HashSet<>();
+		for (MetaDataObject obj : referencedObjects) {
+			actualNames.add(obj.getName());
+		}
+		if (isSubset) {
+			String messageText = "Actual object names ''{0}'' are not a subset of expected names ''{1}''.";
+			Assert.assertTrue(MessageFormat.format(messageText, new Object[] { actualNames, expectedResults }), actualNames.containsAll(expectedResults));
+		} else {
+			String messageText = "Actual object names ''{0}'' do not match expected names ''{1}''.";
+			Assert.assertTrue(MessageFormat.format(messageText, new Object[] { actualNames, expectedResults }), actualNames.equals(expectedResults));
+		}
+	}
+
+	void checkFailingQuery(MetadataStore mds, String searchTerm) {
+		try {
+			logger.log(Level.INFO, "Checking incorrect query \"{0}\"", searchTerm);
+			List<MetaDataObjectReference> searchResult = mds.search(searchTerm);
+			if (searchResult != null) {
+				// Search must return null or throw exception
+				Assert.fail(MessageFormat.format("Incorrect query \"{0}\" did not throw the expected exception.", searchTerm));
+			}
+		} catch (MetadataStoreException e) {
+			logger.log(Level.INFO, "Catching expected exception.", e);
+		}
+	}
+
+	@Test
+	public void testSearchAndRetrieve() {
+		MetadataStore mds = getMetadataStore();
+		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
+		Assert.assertEquals("The metadata store did not retrieve the object with the expected name.", "BankClientsShort", mds.retrieve(bankClientsShortRef).getName());
+
+		// Test queries with conditions
+		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
+		checkQueryResults(mds, new String[] { "SimpleExampleTable", "file2", "file1"}, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.NOT_EQUALS, "BankClientsShort").build(), false);
+		checkQueryResults(mds, new String[] { "NAME" },
+				mds.newQueryBuilder().objectType("Column").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "NAME").simpleCondition("dataType", MetadataQueryBuilder.COMPARATOR.EQUALS, "string").build(), false);
+
+		// Test type hierarchy
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").build(), true);
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("RelationalDataSet").build(), true);
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable", "Simple URL example document", "Simple local example document", "table1", "table2", "file2", "file1" }, mds.newQueryBuilder().objectType("DataSet").build(), false);
+		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("MetaDataObject").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
+	}
+	
+	public static Database getDatabaseTestObject(MetadataStore mds) {
+		String dataStoreQuery = mds.newQueryBuilder().objectType("DataStore").build();
+		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "database1"}, dataStoreQuery, false);
+		return (Database) mds.retrieve(mds.search(dataStoreQuery).get(0));
+	}
+
+	public static Table getTableTestObject(MetadataStore mds) {
+		String tableQuery = mds.newQueryBuilder().objectType("Table").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "table1").build();
+		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "table1"}, tableQuery, false);
+		return (Table) mds.retrieve(mds.search(tableQuery).get(0));
+	}
+
+	public static DataFile getDataFileTestObject(MetadataStore mds) {
+		String dataFileQuery = mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "SimpleExampleTable").build();
+		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "SimpleExampleTable"}, dataFileQuery, false);
+		return (DataFile) mds.retrieve(mds.search(dataFileQuery).get(0));
+	}
+
+	public static DataFileFolder getDataFileFolderTestObject(MetadataStore mds) {
+		String folderQuery = mds.newQueryBuilder().objectType("DataFileFolder").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "rootFolder").build();
+		MetadataStoreTestBase.checkQueryResults(mds, new String[] { "rootFolder"}, folderQuery, false);
+		return (DataFileFolder) mds.retrieve(mds.search(folderQuery).get(0));
+	}
+
+	public static void checkReferences(MetadataStore mds, Database database) throws Exception {
+		List<Schema> schemaList = mds.getSchemas(database);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "schema1" }, schemaList, false);
+		List<Table> tableList = mds.getTables(schemaList.get(0));
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "table1", "table2" }, tableList, false);
+		List<Connection> connectionList = mds.getConnections(database);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "connection1" }, connectionList, false);
+	}
+
+	public static void checkReferences(MetadataStore mds, Table table) throws Exception {
+		JDBCConnectionInfo connectionInfo = (JDBCConnectionInfo) mds.getConnectionInfo(table);
+		Assert.assertTrue("Connection is not set in connection info.", connectionInfo.getConnections().size() > 0);
+		Assert.assertEquals("Connection does not match expected name.", "connection1", connectionInfo.getConnections().get(0).getName());
+		Assert.assertEquals("Schema name of connection info does not match expected value.", "schema1", connectionInfo.getSchemaName());
+	}
+
+	public static void checkReferences(MetadataStore mds, DataFileFolder folder) throws Exception {
+		List<DataFileFolder> nestedFolderList = mds.getDataFileFolders(folder);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "nestedFolder" }, nestedFolderList, false);
+		List<DataFile> fileList = mds.getDataFiles(nestedFolderList.get(0));
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "file1", "file2" }, fileList, false);
+	}
+
+	public static void checkReferences(MetadataStore mds, DataFile file) throws Exception {
+		List<Column> columnList = mds.getColumns(file);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "ColumnName1", "ColumnName2" }, columnList, false);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "SimpleExampleTable" }, Collections.singletonList(mds.getParent(columnList.get(0))), false);
+		MetadataStoreTestBase.checkReferencedObjects(new String[] { "ColumnName1", "ColumnName2" }, mds.getChildren(file), false);
+	}
+
+	@Test
+	public void testReferences() throws Exception {
+		MetadataStore mds = getMetadataStore();
+		checkReferences(mds, getDatabaseTestObject(mds));
+		checkReferences(mds, getTableTestObject(mds));
+		checkReferences(mds, getDataFileFolderTestObject(mds));
+		checkReferences(mds, getDataFileTestObject(mds));
+	}
+
+	@Test
+	public void testErrorHandling() {
+		MetadataStore mds = getMetadataStore();
+		MetaDataObjectReference nonExistentRef = new MetaDataObjectReference();
+		nonExistentRef.setId("non-existing-reference-id");
+		nonExistentRef.setRepositoryId(mds.getRepositoryId());
+
+		Assert.assertEquals("A null value was expected when retrieving a non-existend object.", null, mds.retrieve(nonExistentRef));
+		String errorText = "Metadata search should have returned an empty result set.";
+		Assert.assertEquals(errorText,  mds.search(mds.newQueryBuilder().objectType("nonExistentType").build()), new ArrayList<MetaDataObjectReference>());
+		Assert.assertEquals(errorText,  mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "nonExistentName").build()), new ArrayList<MetaDataObjectReference>());
+
+		if (!mds.getProperties().get(MetadataStore.STORE_PROPERTY_TYPE).equals("atlas")) {
+			// Skip this test because Atlas accepts this query as text search
+			checkFailingQuery(mds, "justAsSingleToken");
+			// Skip this test of Atlas because it does not return an error
+			String validQueryWithCondition = mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build();
+			checkFailingQuery(mds, validQueryWithCondition + DefaultMetadataQueryBuilder.SEPARATOR_STRING + "additionalTrailingToken");
+			String validDataSetQuery = mds.newQueryBuilder().objectType("DataFile").build();
+			checkFailingQuery(mds, validDataSetQuery + DefaultMetadataQueryBuilder.SEPARATOR_STRING + "additionalTrailingToken");
+		}
+	}
+
+	@Test
+	public void testAnnotations() {
+		MetadataStore mds = getMetadataStore();
+
+		String annotationQueryString = mds.newQueryBuilder().objectType("Annotation").build();
+		checkQueryResults(mds, new String[] { "A profiling annotation", "A classification annotation", "A relationship annotation" }, annotationQueryString, false);
+		String analysisRunQuery = mds.newQueryBuilder().objectType("Annotation").simpleCondition("analysisRun", MetadataQueryBuilder.COMPARATOR.EQUALS, analysisRun).build();
+		checkQueryResults(mds, new String[] { "A profiling annotation", "A classification annotation", "A relationship annotation" }, analysisRunQuery, false);
+	}
+
+	@Test
+	public void testResetAllData() {
+		MetadataStore mds = getMetadataStore();
+		mds.resetAllData();
+		String emptyResultSet = mds.newQueryBuilder().objectType("MetaDataObject").build();
+		checkQueryResults(mds, new String[] {}, emptyResultSet, false);
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java
new file mode 100755
index 0000000..5012ab3
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/WritableMetadataStoreTest.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.metadata;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+
+public class WritableMetadataStoreTest extends MetadataStoreTestBase{
+
+	protected MetadataStore getMetadataStore() {
+		return new ODFFactory().create().getMetadataStore();
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java
new file mode 100755
index 0000000..1f00a94
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/importer/JDBCMetadataImporterTest.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.metadata.importer;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.api.metadata.models.Schema;
+import org.apache.atlas.odf.api.metadata.models.Column;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.Table;
+
+public class JDBCMetadataImporterTest extends ODFTestBase {
+	static Logger logger = Logger.getLogger(JDBCMetadataImporterTest.class.getName());
+
+	static boolean testDBRan = false;
+	public static final String SOURCE_DB1 = "DBSAMPLE1";
+	public static final String SOURCE_DB2 = "DBSAMPLE2";
+	public static final String DATABASE1_NAME = SOURCE_DB1;
+	public static final String DATABASE2_NAME =SOURCE_DB2;
+	public static final String SCHEMA1_NAME = "APP1";
+	public static final String SCHEMA2_NAME = "APP2";
+	public static final String TABLE1_NAME = "EMPLOYEE" + System.currentTimeMillis();
+	public static final String TABLE2_NAME = "EMPLOYEE_SHORT" + System.currentTimeMillis();
+
+	@BeforeClass
+	public static void populateTestDB() throws Exception {
+		if (testDBRan) {
+			return;
+		}
+		createTestTables(SOURCE_DB1, SCHEMA1_NAME, TABLE1_NAME, TABLE2_NAME);
+		createTestTables(SOURCE_DB1, SCHEMA2_NAME, TABLE1_NAME, TABLE2_NAME);
+		// Switch table names so that the table named TABLE2_NAME has more columns in the SOURCE_DB2 than it has in SOURCE_DB1
+		createTestTables(SOURCE_DB2, SCHEMA1_NAME, TABLE2_NAME, TABLE1_NAME);
+		testDBRan = true;
+	}
+
+	private static String getConnectionUrl(String dbName) {
+		String dbDir = "/tmp/odf-derby/" + dbName;
+		String connectionURL = "jdbc:derby:" + dbDir + ";create=true";
+		return connectionURL;
+	}
+
+	private static void createTestTables(String dbName, String schemaName, String tableName1, String tableName2) throws Exception {
+		Connection conn = DriverManager.getConnection(getConnectionUrl(dbName));
+
+		String[] stats = new String[] {
+		"CREATE TABLE " + schemaName + "." + tableName1 + " (\r\n" + //
+				"		EMPNO CHAR(6) NOT NULL,\r\n" + //
+				"		FIRSTNME VARCHAR(12) NOT NULL,\r\n" + // 
+				"		MIDINIT CHAR(1),\r\n" + //
+				"		LASTNAME VARCHAR(15) NOT NULL,\r\n" + // 
+				"		WORKDEPT CHAR(3),\r\n" + //
+				"		PHONENO CHAR(4),\r\n" + //
+				"		HIREDATE DATE,\r\n" + //
+				"		JOB CHAR(8),\r\n" + //
+				"		EDLEVEL SMALLINT NOT NULL,\r\n" + // 
+				"		SEX CHAR(1),\r\n" + //
+				"		BIRTHDATE DATE,\r\n" + //
+				"		SALARY DECIMAL(9 , 2),\r\n" + // 
+				"		BONUS DECIMAL(9 , 2),\r\n" + //
+				"		COMM DECIMAL(9 , 2)\r\n" + //
+				"	)",			
+		"INSERT INTO " + schemaName + "." + tableName1 + " VALUES ('000010','CHRISTINE','I','HAAS','A00','3978','1995-01-01','PRES    ',18,'F','1963-08-24',152750.00,1000.00,4220.00)",
+		"INSERT INTO " + schemaName + "." + tableName1 + " VALUES ('000020','MICHAEL','L','THOMPSON','B01','3476','2003-10-10','MANAGER ',18,'M','1978-02-02',94250.00,800.00,3300.00)",
+		// Note that the 2nd table has a subset of the columns of the first table
+		"CREATE TABLE " + schemaName + "." + tableName2 + " (\r\n" + //
+				"		EMPNO CHAR(6) NOT NULL,\r\n" + //
+				"		FIRSTNME VARCHAR(12) NOT NULL,\r\n" + //
+				"		MIDINIT CHAR(1),\r\n" + //
+				"		LASTNAME VARCHAR(15) NOT NULL\r\n" + //
+				"	)",
+		"INSERT INTO " + schemaName + "." + tableName2 + " VALUES ('000010','CHRISTINE','I','HAAS')",
+		"INSERT INTO " + schemaName + "." + tableName2 + " VALUES ('000020','MICHAEL','L','THOMPSON')"
+		};
+
+		for (String stat : stats) {
+			boolean result = conn.createStatement().execute(stat);
+			logger.info("Result of statement: " + result);
+		}
+	}
+
+	private static void runTestImport(MetadataStore mds, String connectionDbName, String importDbName, String schemaName, String tableName) throws Exception {
+		populateTestDB();
+		JDBCMetadataImporter importer = new ODFInternalFactory().create(JDBCMetadataImporter.class);
+		JDBCConnection conn = new JDBCConnection();
+		conn.setJdbcConnectionString(getConnectionUrl(connectionDbName));
+		conn.setUser("dummyUser");
+		conn.setPassword("dummyPassword");
+		JDBCMetadataImportResult importResult = importer.importTables(conn, importDbName, schemaName, tableName);
+		Assert.assertTrue("JDBCMetadataImportResult does not refer to imported database.", importResult.getDatabaseName().equals(importDbName));
+		Assert.assertTrue("JDBCMetadataImportResult does not refer to imported table.", importResult.getTableNames().contains(schemaName + "." + tableName));
+	}
+
+	public static void runTestImport(MetadataStore mds) throws Exception {
+		runTestImport(mds, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE1_NAME);
+	}
+
+	@Test
+	public void testSimpleImport() throws Exception {
+		MetadataStore ams = new ODFFactory().create().getMetadataStore();
+		ams.resetAllData();
+
+		List<String> expectedDatabases = new ArrayList<String>();
+		HashMap<String, List<String>> expectedSchemasForDatabase = new HashMap<String, List<String>>();
+		HashMap<String, List<String>> expectedTablesForSchema = new HashMap<String, List<String>>();
+		HashMap<String, List<String>> expectedColumnsForTable = new HashMap<String, List<String>>();
+
+		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE1_NAME);
+
+		expectedDatabases.add(DATABASE1_NAME);
+		expectedSchemasForDatabase.put(DATABASE1_NAME, new ArrayList<String>());
+		expectedSchemasForDatabase.get(DATABASE1_NAME).add(SCHEMA1_NAME);
+		expectedTablesForSchema.put(SCHEMA1_NAME, new ArrayList<String>());
+		expectedTablesForSchema.get(SCHEMA1_NAME).add(TABLE1_NAME);
+		expectedColumnsForTable.put(TABLE1_NAME, new ArrayList<String>());
+		expectedColumnsForTable.get(TABLE1_NAME).addAll(Arrays.asList(new String[] { "EMPNO", "FIRSTNME", "MIDINIT", "LASTNAME",
+				"WORKDEPT", "PHONENO", "HIREDATE", "JOB", "EDLEVEL", "SEX", "BIRTHDATE", "SALARY", "BONUS", "COMM" }));
+		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
+
+		// Add another table to an existing schema in an existing database
+		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA1_NAME, TABLE2_NAME);
+
+		expectedTablesForSchema.get(SCHEMA1_NAME).add(TABLE2_NAME);
+		expectedColumnsForTable.put(TABLE2_NAME, new ArrayList<String>());
+		expectedColumnsForTable.get(TABLE2_NAME).addAll(Arrays.asList(new String[] { "EMPNO", "FIRSTNME", "MIDINIT", "LASTNAME" }));
+		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
+
+		// Add another schema and table to an existing database
+		runTestImport(ams, SOURCE_DB1, DATABASE1_NAME, SCHEMA2_NAME, TABLE1_NAME);
+
+		expectedSchemasForDatabase.get(DATABASE1_NAME).add(SCHEMA2_NAME);
+		expectedTablesForSchema.put(SCHEMA2_NAME, new ArrayList<String>());
+		expectedTablesForSchema.get(SCHEMA2_NAME).add(TABLE1_NAME);
+		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
+
+		// Import TABLE2_NAME again from SOURCE_DB2 where it has more columns than in SOURCE_DB1
+		runTestImport(ams, SOURCE_DB2, DATABASE1_NAME, SCHEMA1_NAME, TABLE2_NAME);
+
+		// validate that additional columns have been added to the existing table object TABLE2_NAME.
+		expectedColumnsForTable.get(TABLE2_NAME).addAll(Arrays.asList(new String[] { "WORKDEPT", "PHONENO", "HIREDATE", "JOB", "EDLEVEL", "SEX", "BIRTHDATE", "SALARY", "BONUS", "COMM" }));
+		validateImportedObjects(ams, expectedDatabases, expectedSchemasForDatabase, expectedTablesForSchema, expectedColumnsForTable);
+	}
+
+	private void validateImportedObjects(MetadataStore mds, List<String> expectedDatabases, HashMap<String, List<String>> expectedSchemasForDatabase, HashMap<String,
+			List<String>> expectedTablesForSchema, HashMap<String, List<String>> expectedColumnsForTable) throws Exception{
+		for (String dbName : expectedDatabases) {
+			String query = mds.newQueryBuilder().objectType("Database").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, dbName).build();
+			List<MetaDataObjectReference> dbs = mds.search(query);
+			Assert.assertEquals("Number of databases does not match expected value.", 1, dbs.size());
+			Database database = (Database) mds.retrieve(dbs.get(0));
+			logger.log(Level.INFO, MessageFormat.format("Reference ''{0}''.", JSONUtils.toJSON(database)));
+			int numberOfMatchingConnections = 0;
+			for (org.apache.atlas.odf.api.metadata.models.Connection con : mds.getConnections(database)) {
+				if (getConnectionUrl(database.getName()).equals(((JDBCConnection) mds.retrieve(con.getReference())).getJdbcConnectionString())) {
+					numberOfMatchingConnections++;
+				}
+			}
+			Assert.assertEquals("Number of matching JDBC connections does not match expected value.", 1, numberOfMatchingConnections);
+			List<String> actualSchemaNames = new ArrayList<String>();
+			for (Schema schema : mds.getSchemas(database)) {
+				actualSchemaNames.add(schema.getName());
+
+				List<String> actualTableNames = new ArrayList<String>();
+				for (Table table : mds.getTables(schema)) {
+					actualTableNames.add(table.getName());
+
+					List<String> actualColumnNames = new ArrayList<String>();
+					for (Column column : mds.getColumns(table)) {
+						actualColumnNames.add(column.getName());
+					}
+					Assert.assertTrue("Expected columns are missing from metadata store.", actualColumnNames.containsAll(expectedColumnsForTable.get(table.getName())));
+					Assert.assertTrue("Importer has not imported all expected columns.", expectedColumnsForTable.get(table.getName()).containsAll(actualColumnNames));
+				}
+				Assert.assertTrue("Expected tables are missing from metadata store.", actualTableNames.containsAll(expectedTablesForSchema.get(schema.getName())));
+				Assert.assertTrue("Importer has not imported all expected tables.", expectedTablesForSchema.get(schema.getName()).containsAll(actualTableNames));
+			}
+			Assert.assertTrue("Expected schemas are missing from metadata store.", actualSchemaNames.containsAll(expectedSchemasForDatabase.get(database.getName())));
+			Assert.assertTrue("Importer has not imported all expected schemas.", expectedSchemasForDatabase.get(database.getName()).containsAll(actualSchemaNames));
+		}
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java
new file mode 100755
index 0000000..ec0aa9a
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/internal/spark/SparkDiscoveryServiceLocalTest.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.metadata.internal.spark;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
+import org.apache.atlas.odf.api.settings.SparkConfig;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+
+public class SparkDiscoveryServiceLocalTest extends ODFTestBase {
+	protected static Logger logger = Logger.getLogger(SparkDiscoveryServiceLocalTest.class.getName());
+	public static int WAIT_MS_BETWEEN_POLLING = 2000;
+	public static int MAX_NUMBER_OF_POLLS = 400;
+	public static String DISCOVERY_SERVICE_ID = "spark-summary-statistics-example-service";
+	public static String DASHDB_DB = "BLUDB";
+	public static String DASHDB_SCHEMA = "SAMPLES";
+	public static String DASHDB_TABLE = "CUST_RETENTION_LIFE_DURATION";
+	public static enum DATASET_TYPE {
+		FILE, TABLE
+	}
+
+	@BeforeClass
+	public static void createSampleData() throws Exception {
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		if (mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).size() == 0) {
+			mds.createSampleData();
+		}
+	}
+
+	public static SparkConfig getLocalSparkConfig() {
+		SparkConfig config = new SparkConfig();
+		config.setClusterMasterUrl("local");
+		return config;
+	}
+
+	public static DiscoveryServiceProperties getSparkSummaryStatisticsService() throws JSONException {
+		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
+		dsProperties.setId(DISCOVERY_SERVICE_ID);
+		dsProperties.setName("Spark summary statistics service");
+		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
+		dsProperties.setCustomDescription("");
+		dsProperties.setIconUrl("spark.png");
+		dsProperties.setLink("http://www.spark.apache.org");
+		dsProperties.setPrerequisiteAnnotationTypes(null);
+		dsProperties.setResultingAnnotationTypes(null);
+		dsProperties.setSupportedObjectTypes(null);
+		dsProperties.setAssignedObjectTypes(null);
+		dsProperties.setAssignedObjectCandidates(null);
+		dsProperties.setParallelismCount(2);
+		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
+		endpoint.setJar("META-INF/spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
+		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
+		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
+		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
+		return dsProperties;
+	}
+
+	public static DiscoveryServiceProperties getSparkDiscoveryServiceExample() throws JSONException {
+		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
+		dsProperties.setId(DISCOVERY_SERVICE_ID);
+		dsProperties.setName("Spark summary statistics service");
+		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
+		dsProperties.setCustomDescription("");
+		dsProperties.setIconUrl("spark.png");
+		dsProperties.setLink("http://www.spark.apache.org");
+		dsProperties.setPrerequisiteAnnotationTypes(null);
+		dsProperties.setResultingAnnotationTypes(null);
+		dsProperties.setSupportedObjectTypes(null);
+		dsProperties.setAssignedObjectTypes(null);
+		dsProperties.setAssignedObjectCandidates(null);
+		dsProperties.setParallelismCount(2);
+		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
+		endpoint.setJar("META-INF/spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
+		endpoint.setClassName("org.apache.atlas.odf.core.spark.SparkDiscoveryServiceExample");
+		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.Generic);
+		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
+		return dsProperties;
+	}
+
+	public static DataFile getTestDataFile(MetadataStore mds) {
+		DataFile dataSet = null;
+		List<MetaDataObjectReference> refs = mds.search(mds.newQueryBuilder().objectType("DataFile").build());
+		for (MetaDataObjectReference ref : refs) {
+			DataFile file = (DataFile) mds.retrieve(ref);
+			if (file.getName().equals("BankClientsShort")) {
+				dataSet = file;
+				break;
+			}
+		}
+		Assert.assertNotNull(dataSet);
+		logger.log(Level.INFO, "Testing Spark discovery service on metadata object {0} (ref: {1})", new Object[] { dataSet.getName(), dataSet.getReference() });
+		return dataSet;
+	}
+
+	public static Table getTestTable(MetadataStore mds) {
+		Table dataSet = null;
+		List<MetaDataObjectReference> refs = mds.search(mds.newQueryBuilder().objectType("Table").build());
+		for (MetaDataObjectReference ref : refs) {
+			Table table = (Table) mds.retrieve(ref);
+			if (table.getName().equals(DASHDB_TABLE)) {
+				dataSet = table;
+				break;
+			}
+		}
+		Assert.assertNotNull(dataSet);
+		logger.log(Level.INFO, "Testing Spark discovery service on metadata object {0} (ref: {1})", new Object[] { dataSet.getName(), dataSet.getReference() });
+		return dataSet;
+	}
+
+	public static AnalysisRequest getSparkAnalysisRequest(DataSet dataSet) {
+		AnalysisRequest request = new AnalysisRequest();
+		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
+		dataSetRefs.add(dataSet.getReference());
+		request.setDataSets(dataSetRefs);
+		List<String> serviceIds = Arrays.asList(new String[]{DISCOVERY_SERVICE_ID});
+		request.setDiscoveryServiceSequence(serviceIds);
+		return request;
+	}
+
+	public void runSparkServiceTest(SparkConfig sparkConfig, DATASET_TYPE dataSetType, DiscoveryServiceProperties regInfo, String[] annotationNames) throws Exception{
+		logger.info("Using Spark configuration: " + JSONUtils.toJSON(sparkConfig));
+		SettingsManager config = new ODFFactory().create().getSettingsManager();
+		ODFSettings settings = config.getODFSettings();
+		settings.setSparkConfig(sparkConfig);
+		config.updateODFSettings(settings);
+
+		logger.info("Using discovery service: " + JSONUtils.toJSON(regInfo));
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+
+		try {
+			discoveryServicesManager.deleteDiscoveryService(DISCOVERY_SERVICE_ID);
+		} catch(ServiceNotFoundException e) {
+			// Ignore exception because service may not exist
+		}
+		discoveryServicesManager.createDiscoveryService(regInfo);
+
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		Assert.assertNotNull(mds);
+		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+		Assert.assertNotNull(as);
+
+		RelationalDataSet dataSet = null;
+		if (dataSetType == DATASET_TYPE.FILE) {
+			dataSet = getTestDataFile(mds);
+		} else if (dataSetType == DATASET_TYPE.TABLE) {
+			dataSet = getTestTable(mds);
+		} else {
+			Assert.fail();
+		}
+
+		logger.info("Using dataset: " + JSONUtils.toJSON(dataSet));
+
+		AnalysisRequest request = getSparkAnalysisRequest(dataSet);
+		logger.info("Using analysis request: " + JSONUtils.toJSON(request));
+
+		logger.info("Starting analysis...");
+		AnalysisResponse response = analysisManager.runAnalysis(request);
+		Assert.assertNotNull(response);
+		String requestId = response.getId();
+		Assert.assertNotNull(requestId);
+		logger.info("Request id is " + requestId + ".");
+
+		logger.info("Waiting for request to finish");
+		AnalysisRequestStatus status = null;
+		int maxPolls = MAX_NUMBER_OF_POLLS;
+		do {
+			status = analysisManager.getAnalysisRequestStatus(requestId);
+			logger.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
+			maxPolls--;
+			try {
+				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
+			} catch (InterruptedException e) {
+				logger.log(Level.INFO, "Exception thrown: ", e);
+			}
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+		if (maxPolls == 0) {
+			logger.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
+		}
+		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, status.getState());
+
+		List<Annotation> annots = as.getAnnotations(null, status.getRequest().getId());
+		logger.info("Number of annotations created: " + annots.size());
+		Assert.assertTrue("No annotations have been created.", annots.size() > 0);
+
+		logger.log(Level.INFO, "Request ''{0}'' is finished.", requestId);
+
+		discoveryServicesManager.deleteDiscoveryService(DISCOVERY_SERVICE_ID);
+	}
+
+	@Test
+	public void testLocalSparkClusterWithLocalDataFile() throws Exception{
+		runSparkServiceTest(getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkSummaryStatisticsService(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
+	}
+
+	@Test
+	public void testLocalSparkClusterWithLocalDataFileAndDiscoveryServiceRequest() throws Exception{
+		runSparkServiceTest(getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkDiscoveryServiceExample(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java
new file mode 100755
index 0000000..4168b0e
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/integrationtest/metadata/models/CachedMetadataStoreTest.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.integrationtest.metadata.models;
+
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.integrationtest.metadata.MetadataStoreTestBase;
+import org.apache.atlas.odf.core.metadata.WritableMetadataStore;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.DataFileFolder;
+import org.apache.atlas.odf.api.metadata.models.Database;
+import org.apache.atlas.odf.api.metadata.models.Table;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+
+public class CachedMetadataStoreTest extends TimerTestBase {
+	static protected Logger logger = ODFTestLogger.get();
+
+	@Test
+	public void testMetaDataCache() throws Exception {
+		// Note that only a subset of the metadata store test cases are used here because the MetaDataCache does not support queries
+		WritableMetadataStore mds = MetadataStoreTestBase.getWritableMetadataStore();
+		mds.resetAllData();
+		mds.createSampleData();
+		MetadataStoreTestBase.createAdditionalTestData(mds);
+	
+		Database database = MetadataStoreTestBase.getDatabaseTestObject(mds);
+		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, database)), database);
+
+		Table table = MetadataStoreTestBase.getTableTestObject(mds);
+		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, table)), table); 
+
+		DataFileFolder folder = MetadataStoreTestBase.getDataFileFolderTestObject(mds);
+		MetadataStoreTestBase.checkReferences(new CachedMetadataStore(CachedMetadataStore.retrieveMetaDataCache(mds, folder)), folder);
+
+		DataFile file = MetadataStoreTestBase.getDataFileTestObject(mds);
+		MetadataStoreTestBase.checkReferences(mds, file);
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java
new file mode 100755
index 0000000..75d41c5
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFInternalFactoryTest.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import static org.junit.Assert.assertNotNull;
+
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.notification.NotificationManager;
+
+public class ODFInternalFactoryTest extends TimerTestBase {
+
+	Logger logger = ODFTestLogger.get();
+
+	@Test
+	public void testFactoryInstantiations() throws Exception {
+		try {
+			ODFInternalFactory factory = new ODFInternalFactory();
+			Class<?>[] interfaces = new Class<?>[] { //
+			DiscoveryServiceQueueManager.class, //
+					ControlCenter.class, //
+					AnalysisRequestTrackerStore.class, //
+					ThreadManager.class, //
+					ExecutorServiceFactory.class, //
+					NotificationManager.class, //
+					DiscoveryServiceQueueManager.class, //
+			};
+			for (Class<?> cl : interfaces) {
+				Object o = factory.create(cl);
+				assertNotNull(o);
+				logger.info("Object created for class " + cl.getName() + ": " + o.getClass().getName());
+			}
+		} catch (Exception e) {
+			e.printStackTrace();
+			throw e;
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java
new file mode 100755
index 0000000..867f0a9
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestBase.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.engine.SystemHealth;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.engine.EngineManager;
+
+/**
+ * All JUnit test cases that require proper Kafka setup should inherit from this class.
+ * 
+ *
+ */
+public class ODFTestBase extends TimerTestBase {
+
+	static protected Logger log = ODFTestLogger.get();
+	@Test
+	public void testHealth() {
+		testHealth(true);
+	}
+
+	private void testHealth(boolean kafkaRunning) {
+		log.info("Starting health check...");
+		EngineManager engineManager = new ODFFactory().create().getEngineManager();
+		SystemHealth health = engineManager.checkHealthStatus();
+		if (!kafkaRunning) {
+			Assert.assertEquals(SystemHealth.HealthStatus.ERROR, health.getStatus());
+		} else {
+			Assert.assertEquals(SystemHealth.HealthStatus.OK, health.getStatus());
+		}
+		log.info("Health check finished");
+	}
+
+	@BeforeClass
+	public static void startup() throws Exception {
+		TestEnvironment.startAll();
+	}
+
+	@Before
+	public void setup() throws Exception {
+		testHealth(true);
+	}
+
+	@After
+	public void tearDown() throws Exception {
+		testHealth(true);
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java
new file mode 100755
index 0000000..a845157
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestLogger.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import java.util.logging.Logger;
+
+public class ODFTestLogger {
+	
+	public static Logger get() {
+		return Logger.getLogger(ODFTestLogger.class.getName());
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java
new file mode 100755
index 0000000..525dc83
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/ODFTestcase.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import org.junit.BeforeClass;
+
+import org.apache.atlas.odf.api.ODFFactory;
+
+public class ODFTestcase extends TimerTestBase {
+	@BeforeClass
+	public static void setupBeforeClass() {
+		TestEnvironment.startAll();
+		// Initialize analysis manager
+		new ODFFactory().create().getAnalysisManager();
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java
new file mode 100755
index 0000000..06d407e
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironment.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInitializer;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+
+/**
+ * The class can be used to start components required for testing.
+ *
+ *
+ */
+public class TestEnvironment {
+
+	static Logger logger = Logger.getLogger(TestEnvironment.class.getName());
+
+	public static String MESSAGING_CLASS = "org.apache.atlas.odf.core.test.messaging.kafka.TestEnvironmentMessagingInitializer";
+
+	public static <T> T createObject(String className, Class<T> clazz) {
+		ClassLoader cl = TestEnvironment.class.getClassLoader();
+		// messaging
+		try {
+			Class<?> tei = cl.loadClass(className);
+			return (T) tei.newInstance();
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An exception occurred when starting the messaging test environment", exc);
+		}
+		return null;
+	}
+
+	public static void start(String className) {
+		TestEnvironmentInitializer initializer = createObject(className, TestEnvironmentInitializer.class);
+		if (initializer != null) {
+			initializer.start();
+		}
+	}
+
+	public static void startMessaging() {
+		if ("true".equals(new ODFInternalFactory().create(Environment.class).getProperty("odf.dont.start.messaging"))) {
+			// do nothing
+			logger.info("Messaging test environment not started because environment variable odf.dont.start.messaging is set");
+		} else {
+			start(MESSAGING_CLASS);
+		}
+	}
+
+	public static void startAll() {
+		startMessaging();
+		ODFInitializer.start();
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java
new file mode 100755
index 0000000..b4a0022
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TestEnvironmentInitializer.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+public interface TestEnvironmentInitializer {
+	void start();
+	
+	void stop();
+	
+	String getName();
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java
new file mode 100755
index 0000000..68740e4
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/TimerTestBase.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.logging.Logger;
+
+import org.apache.wink.json4j.JSONException;
+import org.junit.AfterClass;
+import org.junit.Rule;
+import org.junit.rules.Stopwatch;
+import org.junit.runner.Description;
+
+import com.google.common.io.Files;
+
+public class TimerTestBase {
+	static final String logFilePath = "/tmp/odf-test-execution-log.csv";
+	static Map<String, HashMap<String, Long>> testTimeMap = new HashMap<String, HashMap<String, Long>>();
+	final static Logger logger = ODFTestLogger.get();
+
+	@Rule
+	public Stopwatch timeWatcher = new Stopwatch() {
+		@Override
+		protected void finished(long nanos, Description description) {
+			HashMap<String, Long> testMap = testTimeMap.get(description.getClassName());
+			if (testMap == null) {
+				testMap = new HashMap<String, Long>();
+				testTimeMap.put(description.getClassName(), testMap);
+			}
+			testMap.put(description.getMethodName(), (nanos / 1000 / 1000));
+		}
+	};
+
+	@AfterClass
+	public static void tearDownAndLogTimes() throws JSONException {
+		try {
+			File logFile = new File(logFilePath);
+			Set<String> uniqueRows = new HashSet<String>();
+			if (logFile.exists()) {
+				uniqueRows = new HashSet<String>(Files.readLines(logFile, StandardCharsets.UTF_8));
+			}
+
+			for (Entry<String, HashMap<String, Long>> entry : testTimeMap.entrySet()) {
+				for (Entry<String, Long> testEntry : entry.getValue().entrySet()) {
+					String logRow = new StringBuilder().append(testEntry.getKey()).append(",").append(testEntry.getValue()).append(",").append(entry.getKey()).append(",")
+							.append(System.getProperty("odf.build.project.name", "ProjectNameNotDefined")).toString();
+					uniqueRows.add(logRow);
+				}
+			}
+
+			StringBuilder logContent = new StringBuilder();
+			Iterator<String> rowIterator = uniqueRows.iterator();
+			while (rowIterator.hasNext()) {
+				logContent.append(rowIterator.next());
+				if (rowIterator.hasNext()) {
+					logContent.append("\n");
+				}
+			}
+
+			logger.info("Total time consumed by succeeded tests:\n" + logContent.toString());
+			logFile.createNewFile();
+			Files.write(logContent.toString().getBytes("UTF-8"), logFile);
+		} catch (IOException e) {
+			logger.warning("Error writing test execution log");
+			e.printStackTrace();
+		}
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java
new file mode 100755
index 0000000..7a1f0ed
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationExtensionTest.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.annotation;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.fasterxml.jackson.core.Version;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+import org.apache.atlas.odf.json.AnnotationDeserializer;
+import org.apache.atlas.odf.json.AnnotationSerializer;
+
+public class AnnotationExtensionTest extends TimerTestBase {
+
+	static Logger logger = ODFTestLogger.get();
+
+	public static <T> T readJSONObjectFromFileInClasspath(ObjectMapper om, Class<T> cl, String pathToFile, ClassLoader classLoader) {
+		if (classLoader == null) {
+			// use current classloader if not provided
+			classLoader = AnnotationExtensionTest.class.getClassLoader();
+		}
+		InputStream is = classLoader.getResourceAsStream(pathToFile);
+		T result = null;
+		try {
+			result = om.readValue(is, cl);
+		} catch (IOException e) {
+			// assume that this is a severe error since the provided JSONs should be correct
+			throw new RuntimeException(e);
+		}
+
+		return result;
+	}
+
+	@Test
+	public void testWithUtils() throws Exception {
+		testSimple(JSONUtils.getGlobalObjectMapper());
+	}
+
+	@Test
+	public void testWithSeparateObjectMapper() throws Exception {
+		ObjectMapper om = new ObjectMapper();
+		SimpleModule mod = new SimpleModule("annotation module", Version.unknownVersion());
+		mod.addDeserializer(Annotation.class, new AnnotationDeserializer());
+		mod.addSerializer(Annotation.class, new AnnotationSerializer());
+		om.registerModule(mod);
+		testSimple(om);
+	}
+
+	private void testSimple(ObjectMapper om) throws Exception {
+		ExtensionTestAnnotation newTestAnnot = new ExtensionTestAnnotation();
+		String strValue = "newstring1";
+		int intValue = 4237;
+		newTestAnnot.setNewStringProp1(strValue);
+		newTestAnnot.setNewIntProp2(intValue);
+//		String newTestAnnotJSON = om.writeValueAsString(newTestAnnot);
+		String newTestAnnotJSON = JSONUtils.toJSON(newTestAnnot).toString();
+		logger.info("New test annot JSON: " + newTestAnnotJSON);
+
+		logger.info("Deserializing with " + Annotation.class.getSimpleName() + "class as target class");
+		Annotation annot1 = om.readValue(newTestAnnotJSON, Annotation.class);
+		Assert.assertNotNull(annot1);
+		logger.info("Deserialized annotation JSON (target: " + Annotation.class.getSimpleName() + "): " + om.writeValueAsString(annot1));
+		logger.info("Deserialized annotation class (target: " + Annotation.class.getSimpleName() + "): " + annot1.getClass().getName());
+		Assert.assertEquals(ExtensionTestAnnotation.class, annot1.getClass());
+		ExtensionTestAnnotation extAnnot1 = (ExtensionTestAnnotation) annot1;
+		Assert.assertEquals(strValue, extAnnot1.getNewStringProp1());
+		Assert.assertEquals(intValue, extAnnot1.getNewIntProp2());
+
+		/* This does not make sense as you would never enter ExtensionTestAnnotation.class as deserialization target
+		 * which would enforce usage of the standard Bean serializer (since no serializer is registered for this specific class -> jsonProperties can not be mapped
+		logger.info("Calling deserialization with " + ExtensionTestAnnotation.class.getSimpleName() + " as target");
+		ExtensionTestAnnotation annot2 = om.readValue(newTestAnnotJSON, ExtensionTestAnnotation.class);
+		Assert.assertNotNull(annot2);
+		logger.info("Deserialized annotation JSON (target: " + ExtensionTestAnnotation.class.getSimpleName() + "): " + om.writeValueAsString(annot2));
+		logger.info("Deserialized annotation class (target: " + ExtensionTestAnnotation.class.getSimpleName() + "): " + annot2.getClass().getName());
+		Assert.assertEquals(ExtensionTestAnnotation.class, annot2.getClass());
+		String s = annot2.getNewStringProp1();
+		Assert.assertEquals(strValue, annot2.getNewStringProp1());
+		Assert.assertEquals(intValue, annot2.getNewIntProp2()); */
+
+		logger.info("Processing profiling annotation...");
+		Annotation unknownAnnot = readJSONObjectFromFileInClasspath(om, Annotation.class, "org/apache/atlas/odf/core/test/annotation/annotexttest1.json", null);
+		Assert.assertNotNull(unknownAnnot);
+		logger.info("Read Unknown annotation: " + unknownAnnot.getClass().getName());
+		Assert.assertEquals(ProfilingAnnotation.class, unknownAnnot.getClass());
+
+		logger.info("Read profiling annotation: " + om.writeValueAsString(unknownAnnot));
+		JSONObject jsonPropertiesObj = new JSONObject(unknownAnnot.getJsonProperties());
+		Assert.assertEquals("newProp1Value", jsonPropertiesObj.get("newProp1"));
+		Assert.assertEquals((Integer) 4237, jsonPropertiesObj.get("newProp2"));
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java
new file mode 100755
index 0000000..b65ce17
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/AnnotationStoreTest.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.annotation;
+
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+
+public class AnnotationStoreTest extends ODFTestcase {
+
+	private AnnotationStore createAnnotationStore() {
+		return new DefaultStatusQueueStore();
+	}
+	
+	@Test
+	public void testStoreProfilingAnnotation() throws Exception {
+		AnnotationStore as = createAnnotationStore();
+		
+		String modRef1Id = UUID.randomUUID().toString();
+		MetaDataObjectReference mdoref1 = new MetaDataObjectReference();
+		mdoref1.setId(modRef1Id);
+		
+		ProfilingAnnotation annot1 = new ProfilingAnnotation();
+		annot1.setJsonProperties("{\"a\": \"b\"}");
+		annot1.setAnnotationType("AnnotType1");
+		annot1.setProfiledObject(mdoref1);
+
+		MetaDataObjectReference annot1Ref = as.store(annot1);
+		Assert.assertNotNull(annot1Ref.getId());
+		List<Annotation> retrievedAnnots = as.getAnnotations(mdoref1, null);
+		Assert.assertEquals(1, retrievedAnnots.size());
+		
+		Annotation retrievedAnnot = retrievedAnnots.get(0);
+		Assert.assertTrue(annot1 != retrievedAnnot);
+		Assert.assertTrue(retrievedAnnot instanceof ProfilingAnnotation);
+		ProfilingAnnotation retrievedProfilingAnnotation = (ProfilingAnnotation) retrievedAnnot;
+		Assert.assertEquals(modRef1Id, retrievedProfilingAnnotation.getProfiledObject().getId());
+		Assert.assertEquals(annot1Ref, retrievedAnnot.getReference());
+		
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java
new file mode 100755
index 0000000..cd8f695
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/ExtensionTestAnnotation.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.annotation;
+
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+
+class ExtensionTestAnnotation extends ProfilingAnnotation {
+
+	private String newStringProp1;
+	private int newIntProp2;
+
+	public String getNewStringProp1() {
+		return newStringProp1;
+	}
+
+	public void setNewStringProp1(String newStringProp1) {
+		this.newStringProp1 = newStringProp1;
+	}
+
+	public int getNewIntProp2() {
+		return newIntProp2;
+	}
+
+	public void setNewIntProp2(int newIntProp2) {
+		this.newIntProp2 = newIntProp2;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java
new file mode 100755
index 0000000..f65e3ad
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingExtendedAnnotations.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.annotation;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+public class TestSyncDiscoveryServiceWritingExtendedAnnotations extends DiscoveryServiceBase implements SyncDiscoveryService {
+	Logger logger = ODFTestLogger.get();
+
+	public static class SyncDiscoveryServiceAnnotation extends ProfilingAnnotation {
+		private String prop1 = "";
+		private int prop2 = 4237;
+		private MyObject prop3 = new MyObject();
+
+		public String getProp1() {
+			return prop1;
+		}
+
+		public void setProp1(String prop1) {
+			this.prop1 = prop1;
+		}
+
+		public int getProp2() {
+			return prop2;
+		}
+
+		public void setProp2(int prop2) {
+			this.prop2 = prop2;
+		}
+
+		public MyObject getProp3() {
+			return prop3;
+		}
+
+		public void setProp3(MyObject prop3) {
+			this.prop3 = prop3;
+		}
+
+	}
+
+	public static class MyObject {
+		private String anotherProp = "";
+
+		public String getAnotherProp() {
+			return anotherProp;
+		}
+
+		public void setAnotherProp(String anotherProp) {
+			this.anotherProp = anotherProp;
+		}
+
+		private MyOtherObject yetAnotherProp = new MyOtherObject();
+
+		public MyOtherObject getYetAnotherProp() {
+			return yetAnotherProp;
+		}
+
+		public void setYetAnotherProp(MyOtherObject yetAnotherProp) {
+			this.yetAnotherProp = yetAnotherProp;
+		}
+
+	}
+
+	public static class MyOtherObject {
+		private String myOtherObjectProperty = "";
+
+		public String getMyOtherObjectProperty() {
+			return myOtherObjectProperty;
+		}
+
+		public void setMyOtherObjectProperty(String myOtherObjectProperty) {
+			this.myOtherObjectProperty = myOtherObjectProperty;
+		}
+
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		try {
+			MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
+
+			List<Annotation> annotations = new ArrayList<>();
+			SyncDiscoveryServiceAnnotation annotation1 = new SyncDiscoveryServiceAnnotation();
+			String annotation1_prop1 = "prop1_1_" + dataSetRef.getUrl();
+			annotation1.setProp1(annotation1_prop1);
+			annotation1.setProp2(annotation1_prop1.hashCode());
+			annotation1.setProfiledObject(dataSetRef);
+			MyObject mo1 = new MyObject();
+			MyOtherObject moo1 = new MyOtherObject();
+			moo1.setMyOtherObjectProperty("nestedtwolevels" + annotation1_prop1);
+			mo1.setYetAnotherProp(moo1);
+			mo1.setAnotherProp("nested" + annotation1_prop1);
+			annotation1.setProp3(mo1);
+			annotations.add(annotation1);
+
+			SyncDiscoveryServiceAnnotation annotation2 = new SyncDiscoveryServiceAnnotation();
+			String annotation2_prop1 = "prop1_2_" + dataSetRef.getUrl();
+			annotation2.setProp1(annotation2_prop1);
+			annotation2.setProp2(annotation2_prop1.hashCode());
+			annotation2.setProfiledObject(dataSetRef);
+			MyObject mo2 = new MyObject();
+			MyOtherObject moo2 = new MyOtherObject();
+			moo2.setMyOtherObjectProperty("nestedtwolevels" + annotation2_prop1);
+			mo2.setYetAnotherProp(moo2);
+			mo2.setAnotherProp("nested" + annotation2_prop1);
+			annotation2.setProp3(mo2);
+			annotations.add(annotation2);
+
+			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
+			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+			DiscoveryServiceResult dsResult = new DiscoveryServiceResult();
+			dsResult.setAnnotations(annotations);
+			resp.setResult(dsResult);
+			resp.setDetails(this.getClass().getName() + ".runAnalysis finished OK");
+
+			logger.info("Returning from discovery service " + this.getClass().getSimpleName() + " with result: " + JSONUtils.toJSON(resp));
+			return resp;
+		} catch (Exception exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java
new file mode 100755
index 0000000..91b544c
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/annotation/TestSyncDiscoveryServiceWritingJsonAnnotations.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.annotation;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResult;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+public class TestSyncDiscoveryServiceWritingJsonAnnotations extends DiscoveryServiceBase implements SyncDiscoveryService {
+	Logger logger = ODFTestLogger.get();
+	private String annotationResult = Utils.getInputStreamAsString(this.getClass().getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json"), "UTF-8");
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		try {
+			MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
+
+			List<Annotation> annotations = new ArrayList<>();
+			ProfilingAnnotation annotation1 = new ProfilingAnnotation();
+			annotation1.setProfiledObject(dataSetRef);
+			annotation1.setJsonProperties(annotationResult);
+			annotation1.setAnnotationType("JsonAnnotationWriteTest");
+			annotation1.setJavaClass("JsonAnnotationWriteTest");
+			annotations.add(annotation1);
+
+			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
+			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+			DiscoveryServiceResult dsResult = new DiscoveryServiceResult();
+			dsResult.setAnnotations(annotations);
+			resp.setResult(dsResult);
+			resp.setDetails(this.getClass().getName() + ".runAnalysis finished OK");
+
+			logger.info("Returning from discovery service " + this.getClass().getSimpleName() + " with result: " + JSONUtils.toJSON(resp));
+			return resp;
+		} catch (Exception exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java
new file mode 100755
index 0000000..b1d2518
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ODFConfigurationTest.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.configuration;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Map;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.configuration.ConfigManager;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+/**
+ * this test uses a mocked storage therefore no zookeeper is required
+ */
+public class ODFConfigurationTest extends ODFTestcase {
+
+	Logger logger = ODFTestLogger.get();
+
+	@Before
+	public void setupDefaultConfig() throws JsonParseException, JsonMappingException, IOException, ValidationException, JSONException {
+		logger.info("reset config to default");
+		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
+		ConfigContainer defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
+		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
+		configManager.updateConfigContainer(defaultConfig);
+	}
+
+	@Test
+	public void testUserDefinedMerge() throws JsonParseException, JsonMappingException, IOException {
+		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
+		ConfigContainer defaultConfig;
+		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
+		//set testProps to defaultValues to be overwritten
+		defaultConfig.getOdf().getUserDefined().put("testProp", "defaultValue");
+		defaultConfig.getOdf().getUserDefined().put("testProp2", "defaultValue");
+		logger.info("Read config: " + defaultConfig);
+
+		//config example with userdefined property testProp to 123
+		String value = "{\r\n\t\"odf\" : {\r\n\t\"userDefined\" : {\r\n\t\t\"testProp\" : 123\r\n\t}\r\n}\r\n}\r\n";
+		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
+		Utils.mergeODFPOJOs(defaultConfig, props);
+		logger.info("Mergded config: " + defaultConfig);
+
+		Assert.assertEquals(123, defaultConfig.getOdf().getUserDefined().get("testProp"));
+		Assert.assertEquals("defaultValue", defaultConfig.getOdf().getUserDefined().get("testProp2"));
+	}
+
+	@Test
+	public void testValidation() throws JsonParseException, JsonMappingException, IOException {
+		boolean exceptionOccured = false;
+		String value = "{\r\n\t\"odf\" : {\r\n\t\t\"discoveryServiceWatcherWaitMs\" : -5\r\n\t}\r\n}\r\n";
+		try {
+			ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
+			props.validate();
+		} catch (ValidationException e) {
+			exceptionOccured = true;
+		}
+
+		Assert.assertTrue(exceptionOccured);
+	}
+
+	@Test
+	public void testMerge() throws JsonParseException, JsonMappingException, IOException {
+		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
+		ConfigContainer defaultConfig;
+		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
+		//config example with ODF - queueConsumerWaitMs property value 777
+		String value = "{\r\n\t\"odf\" : {\r\n\t\t\"discoveryServiceWatcherWaitMs\" : 777\r\n\t}\r\n}\r\n";
+		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
+		Utils.mergeODFPOJOs(defaultConfig, props);
+
+		// TODOCONFIG, move next line to kafka tests
+		// Assert.assertEquals(777, defaultConfig.getOdf().getQueueConsumerWaitMs().intValue());
+	}
+
+	@Test
+	public void testDeepMerge() throws JsonParseException, JsonMappingException, IOException {
+		InputStream is = ODFConfigurationTest.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json");
+		ConfigContainer defaultConfig;
+		defaultConfig = new ObjectMapper().readValue(is, ConfigContainer.class);
+		//config example with ODF - kafkaConsumer - offsetsStorage property value TEST. All other values for the kafkaConsumer should stay the same!
+		String value = "{\r\n\t\"odf\" : {\r\n\"messagingConfiguration\": { \"type\": \"" + KafkaMessagingConfiguration.class.getName()
+				+ "\", \t\t\"kafkaConsumerConfig\" : { \r\n\t\t\t\"offsetsStorage\" : \"TEST\"\r\n\t\t}\r\n\t}\r\n}}\r\n";
+		ConfigContainer props = new ObjectMapper().readValue(value, ConfigContainer.class);
+		Utils.mergeODFPOJOs(defaultConfig, props);
+
+		// TODOCONFIG
+		//		Assert.assertEquals("TEST", defaultConfig.getOdf().getKafkaConsumerConfig().getOffsetsStorage());
+		//make sure the rest is still default
+		//		Assert.assertEquals(400, defaultConfig.getOdf().getKafkaConsumerConfig().getZookeeperSessionTimeoutMs().intValue());
+	}
+
+	@Test
+	public void testGet() {
+		Assert.assertTrue(new ODFFactory().create().getSettingsManager().getODFSettings().isReuseRequests());
+	}
+
+	@Test
+	public void testPut() throws InterruptedException, IOException, ValidationException, JSONException, ServiceNotFoundException {
+		SettingsManager config = new ODFFactory().create().getSettingsManager();
+		String propertyId = "my_dummy_test_property";
+		int testNumber = 123;
+		Map<String, Object> cont = config.getUserDefinedConfig();
+		cont.put(propertyId, testNumber);
+		config.updateUserDefined(cont);
+		Assert.assertEquals(testNumber, config.getUserDefinedConfig().get(propertyId));
+
+		String testString = "test";
+		cont.put(propertyId, testString);
+		config.updateUserDefined(cont);
+
+		Assert.assertEquals(testString, config.getUserDefinedConfig().get(propertyId));
+
+		JSONObject testJson = new JSONObject();
+		testJson.put("testProp", "test");
+		cont.put(propertyId, testJson);
+		config.updateUserDefined(cont);
+
+		Assert.assertEquals(testJson, config.getUserDefinedConfig().get(propertyId));
+
+		ODFSettings settings = config.getODFSettings();
+		logger.info("Last update object: " + JSONUtils.toJSON(settings));
+		Assert.assertNotNull(settings);
+		Assert.assertNotNull(settings.getUserDefined());
+		Assert.assertNotNull(settings.getUserDefined().get(propertyId));
+		logger.info("User defined object: " + settings.getUserDefined().get(propertyId).getClass());
+		@SuppressWarnings("unchecked")
+		Map<String, Object> notifiedNestedJSON = (Map<String, Object>) settings.getUserDefined().get(propertyId);
+		Assert.assertNotNull(notifiedNestedJSON.get("testProp"));
+		Assert.assertTrue(notifiedNestedJSON.get("testProp") instanceof String);
+		Assert.assertEquals("test", notifiedNestedJSON.get("testProp"));
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java
new file mode 100755
index 0000000..aea9a30
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/PasswordEncryptionTest.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.configuration;
+
+import java.util.logging.Logger;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.api.settings.SparkConfig;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class PasswordEncryptionTest extends TimerTestBase {
+	Logger logger = ODFTestLogger.get();
+	private static final String SPARK_PASSWORD_CONFIG = "spark.authenticate.secret";
+
+	@Test
+	public void testGeneralPasswordEncryption() throws Exception {
+		SettingsManager settings = new ODFFactory().create().getSettingsManager();
+		ODFSettings settingsWithPlainPasswords = settings.getODFSettingsHidePasswords();
+		settingsWithPlainPasswords.setOdfPassword("newOdfPassword");
+		logger.info("Settings with plain password: " + JSONUtils.toJSON(settingsWithPlainPasswords));
+		settings.updateODFSettings(settingsWithPlainPasswords);
+
+		ODFSettings settingsWithHiddenPasswords = settings.getODFSettingsHidePasswords();
+		String hiddenPasswordIdentifyier = "***hidden***";
+		Assert.assertEquals(hiddenPasswordIdentifyier, settingsWithHiddenPasswords.getOdfPassword());
+		logger.info("Settings with hidden password: " + JSONUtils.toJSON(settingsWithHiddenPasswords));
+
+		ODFSettings settingsWithEncryptedPassword = settings.getODFSettings();
+		Assert.assertEquals("newOdfPassword", Encryption.decryptText(settingsWithEncryptedPassword.getOdfPassword()));
+		logger.info("Settings with encrypted password: " + JSONUtils.toJSON(settingsWithEncryptedPassword));
+
+		// When overwriting settings with hidden passwords, encrypted passwords must be kept internally
+		settings.updateODFSettings(settingsWithHiddenPasswords);
+		settingsWithEncryptedPassword = settings.getODFSettings();
+		Assert.assertEquals("newOdfPassword", Encryption.decryptText(settingsWithEncryptedPassword.getOdfPassword()));
+	}
+
+	@Test
+	public void testSparkConfigEncryption() throws Exception {
+		SettingsManager settings = new ODFFactory().create().getSettingsManager();
+		SparkConfig plainSparkConfig = new SparkConfig();
+		plainSparkConfig.setConfig(SPARK_PASSWORD_CONFIG, "plainConfigValue");
+		ODFSettings settingsWithPlainPasswords = settings.getODFSettings();
+		settingsWithPlainPasswords.setSparkConfig(plainSparkConfig);;
+		logger.info("Settings with plain password: " + JSONUtils.toJSON(settingsWithPlainPasswords));
+		settings.updateODFSettings(settingsWithPlainPasswords);
+
+		ODFSettings settingsWithHiddenPasswords = settings.getODFSettingsHidePasswords();
+		String hiddenPasswordIdentifyier = "***hidden***";
+		String hiddenConfigValue = (String) settingsWithHiddenPasswords.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
+		Assert.assertEquals(hiddenPasswordIdentifyier, hiddenConfigValue);
+		logger.info("Config with hidden password: " + JSONUtils.toJSON(settingsWithHiddenPasswords));
+
+		ODFSettings settingsWithEncryptedPassword = settings.getODFSettings();
+		String encryptedConfigValue = (String) settingsWithEncryptedPassword.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
+		Assert.assertEquals("plainConfigValue", Encryption.decryptText(encryptedConfigValue));
+		logger.info("Config with encrypted password: " + JSONUtils.toJSON(settingsWithEncryptedPassword));
+
+		// When overwriting settings with hidden passwords, encrypted passwords must be kept internally
+		settings.updateODFSettings(settingsWithHiddenPasswords);
+		encryptedConfigValue = (String) settingsWithEncryptedPassword.getSparkConfig().getConfigs().get(SPARK_PASSWORD_CONFIG);
+		Assert.assertEquals("plainConfigValue", Encryption.decryptText(encryptedConfigValue));
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java
new file mode 100755
index 0000000..e844e05
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/configuration/ValidationTests.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.configuration;
+
+import java.util.Collections;
+
+import org.apache.atlas.odf.api.settings.validation.EnumValidator;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.configuration.ConfigManager;
+import org.apache.atlas.odf.core.configuration.ServiceValidator;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.settings.validation.ImplementationValidator;
+import org.apache.atlas.odf.api.settings.validation.NumberPositiveValidator;
+import org.apache.atlas.odf.api.settings.validation.PropertyValidator;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+import org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryServiceWritingAnnotations1;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class ValidationTests extends TimerTestBase {
+
+	@Test
+	public void testEnum() {
+		String[] vals = new String[] { "test", "test2" };
+		String correct = "test";
+		String incorrect = "fail";
+
+		Assert.assertTrue(validateTest(correct, new EnumValidator(vals)));
+		Assert.assertFalse(validateTest(incorrect, new EnumValidator(vals)));
+	}
+
+	@Test
+	public void testImplementation() {
+		String correct = TestAsyncDiscoveryServiceWritingAnnotations1.class.getName();
+		String incorrect = "dummyClass";
+		Assert.assertTrue(validateTest(correct, new ImplementationValidator()));
+		Assert.assertFalse(validateTest(incorrect, new ImplementationValidator()));
+	}
+
+	@Test
+	public void testService() throws Exception {
+		String s = "{\r\n" + 
+				"			\"id\": \"asynctestservice\",\r\n" + 
+				"			\"name\": \"Async test\",\r\n" + 
+				"			\"description\": \"The async test service\",\r\n" + 
+				"			\"endpoint\": {\r\n" + 
+				"				\"runtimeName\": \"Java\",\r\n" + 
+				"				\"className\": \"org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryService1\"\r\n" +
+				"			}\r\n" + 
+				"		}";
+		
+		DiscoveryServiceProperties newService = JSONUtils.fromJSON(s, DiscoveryServiceProperties.class);
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		//ODFConfig odfConfig = new ODFFactory().create(ODFConfiguration.class).getODFConfig();
+
+		ConfigContainer new1 = new ConfigContainer();
+		new1.setRegisteredServices(Collections.singletonList(newService));
+		ConfigManager configManager = new ODFInternalFactory().create(ConfigManager.class);
+		configManager.updateConfigContainer(new1);
+		
+		DiscoveryServiceProperties correct = discoveryServicesManager.getDiscoveryServicesProperties().get(0);
+		Assert.assertEquals("asynctestservice", correct.getId());
+		correct.setId("newId");
+		DiscoveryServiceProperties incorrect = new DiscoveryServiceProperties();
+		Assert.assertTrue(validateTest(correct, new ServiceValidator()));
+		Assert.assertFalse(validateTest(incorrect, new ServiceValidator()));
+	}
+
+	@Test
+	public void testNumber() {
+		int correct = 5;
+		int incorrect = -5;
+		Assert.assertTrue(validateTest(correct, new NumberPositiveValidator()));
+		Assert.assertFalse(validateTest(incorrect, new NumberPositiveValidator()));
+	}
+
+	private boolean validateTest(Object value, PropertyValidator validator) {
+		try {
+			validator.validate(null, value);
+			return true;
+		} catch (ValidationException ex) {
+			return false;
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java
new file mode 100755
index 0000000..4fa2eda
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisProcessingTests.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import java.text.MessageFormat;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.settings.MessagingConfiguration;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class AnalysisProcessingTests extends ODFTestcase {
+	Logger logger = ODFTestLogger.get();
+
+	@Test
+	public void testAnalysisProcessingAfterShutdown() throws Exception {
+		final SettingsManager config = new ODFFactory().create().getSettingsManager();
+		final ODFSettings odfSettings = config.getODFSettings();
+		final MessagingConfiguration messagingConfiguration = odfSettings.getMessagingConfiguration();
+		final Long origRequestRetentionMs = messagingConfiguration.getAnalysisRequestRetentionMs();
+		messagingConfiguration.setAnalysisRequestRetentionMs(300000l);
+		config.updateODFSettings(odfSettings);
+
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
+		AnalysisRequest req = tracker.getRequest();
+		req.setDiscoveryServiceSequence(Arrays.asList("asynctestservice"));
+		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
+		final AnalysisResponse startRequest = cc.startRequest(req);
+		logger.info("Analysis :" + startRequest.getId());
+
+		Assert.assertNull(startRequest.getOriginalRequest());
+		Assert.assertFalse(startRequest.isInvalidRequest());
+		final AnalysisResponse duplicate = cc.startRequest(req);
+		Assert.assertNotNull(duplicate.getOriginalRequest());
+		Assert.assertEquals(startRequest.getId(), duplicate.getId());
+		logger.info("Analysis1 duplciate :" + duplicate.getId());
+
+		final AnalysisCancelResult cancelRequest = cc.cancelRequest(startRequest.getId());
+		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
+
+		cc.getQueueManager().stop();
+
+		AnalysisResponse response2 = cc.startRequest(req);
+		logger.info("Analysis2:" + response2.getId());
+		AnalysisRequestStatus requestStatus = cc.getRequestStatus(response2.getId());
+		int maxWait = 20;
+
+		int currentWait = 0;
+		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.ACTIVE) {
+			Thread.sleep(100);
+			currentWait++;
+			requestStatus = cc.getRequestStatus(response2.getId());
+		}
+		logger.info("THREAD ACTIVE, KILL IT!");
+
+		cc.getQueueManager().start();
+		logger.info("restarted");
+		Assert.assertNull(response2.getOriginalRequest());
+		Assert.assertFalse(response2.isInvalidRequest());
+
+		messagingConfiguration.setAnalysisRequestRetentionMs(origRequestRetentionMs);
+		config.updateODFSettings(odfSettings);
+
+		currentWait = 0;
+		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.FINISHED) {
+			Thread.sleep(100);
+			requestStatus = cc.getRequestStatus(response2.getId());
+		}
+		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, requestStatus.getState());
+	}
+
+	@Test
+	public void testRequestWithAnnotationTypes() throws Exception {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
+		AnalysisRequest req = tracker.getRequest();
+		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
+		List<String> annotationTypes = Arrays.asList(new String[] { "AsyncTestDummyAnnotation" });
+		req.setAnnotationTypes(annotationTypes);
+		logger.info(MessageFormat.format("Running discovery request for annotation type {0}.", annotationTypes));
+		AnalysisResponse resp = cc.startRequest(req);
+		logger.info(MessageFormat.format("Started request id {0}.", resp.getId()));
+		Assert.assertNotNull(resp.getId());
+		Assert.assertFalse(resp.isInvalidRequest());
+
+		int currentWait = 0;
+		int maxWait = 20;
+		AnalysisRequestStatus requestStatus = cc.getRequestStatus(resp.getId());
+		while (currentWait < maxWait && requestStatus.getState() != AnalysisRequestStatus.State.FINISHED) {
+			Thread.sleep(100);
+			requestStatus = cc.getRequestStatus(resp.getId());
+		}
+		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, requestStatus.getState());
+		Assert.assertEquals("Generated service has incorrect number of elements.", 1, requestStatus.getRequest().getDiscoveryServiceSequence().size());
+		Assert.assertEquals("Generated service sequence differs from expected value.", "asynctestservice", requestStatus.getRequest().getDiscoveryServiceSequence().get(0));
+	}
+
+	@Test
+	public void testRequestWithMissingAnnotationTypes() throws Exception {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
+		AnalysisRequest req = tracker.getRequest();
+		req.getDataSets().get(0).setId(ODFAPITest.DUMMY_SUCCESS_ID + "_dataset");
+		List<String> annotationTypes = Arrays.asList(new String[] { "noServiceExistsForThisAnnotationType" });
+		req.setAnnotationTypes(annotationTypes);
+		logger.info(MessageFormat.format("Running discovery request for non-existing annotation type {0}.", annotationTypes));
+		AnalysisResponse resp = cc.startRequest(req);
+		Assert.assertTrue(resp.isInvalidRequest());
+		Assert.assertEquals("Unexpected error message.", "No suitable discovery services found to create the requested annotation types.", resp.getDetails());
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java
new file mode 100755
index 0000000..fd39e15
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestCancellationTest.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import java.util.Collections;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+
+public class AnalysisRequestCancellationTest extends ODFTestcase {
+
+	Logger logger = ODFTestLogger.get();
+
+	AnalysisRequestTracker generateTracker(String id, STATUS status) {
+		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
+		Utils.setCurrentTimeAsLastModified(tracker);
+		tracker.setNextDiscoveryServiceRequest(0);
+		AnalysisRequest req = new AnalysisRequest();
+		req.setId(id);
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId("DataSet" + id);
+		req.setDataSets(Collections.singletonList(ref));
+		tracker.setRequest(req);
+		tracker.setStatus(status);
+		return tracker;
+	}
+
+	@Test
+	public void testRequestCancellationNotFoundFailure() {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisCancelResult cancelRequest = cc.cancelRequest("dummy_id");
+		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.NOT_FOUND);
+	}
+
+	@Test
+	public void testRequestCancellationWrongStateFailure() {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
+		String testId = "test_id1";
+		AnalysisRequestTracker tracker = null;
+		AnalysisCancelResult cancelRequest = null;
+		
+		tracker = generateTracker(testId, STATUS.FINISHED);
+		store.store(tracker);
+		cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
+
+		tracker = generateTracker(testId, STATUS.ERROR);
+		store.store(tracker);
+		cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
+
+		tracker = generateTracker(testId, STATUS.CANCELLED);
+		store.store(tracker);
+		cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(cancelRequest.getState(), AnalysisCancelResult.State.INVALID_STATE);
+	}
+
+	@Test
+	public void testRequestCancellationSuccess() {
+		ControlCenter cc = new ODFInternalFactory().create(ControlCenter.class);
+		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
+		String testId = "test_id2";
+
+		AnalysisRequestTracker tracker = generateTracker(testId, STATUS.INITIALIZED);
+		store.store(tracker);
+		AnalysisCancelResult cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
+
+		tracker = generateTracker(testId, STATUS.IN_DISCOVERY_SERVICE_QUEUE);
+		store.store(tracker);
+		cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
+
+		tracker = generateTracker(testId, STATUS.DISCOVERY_SERVICE_RUNNING);
+		store.store(tracker);
+		cancelRequest = cc.cancelRequest(testId);
+		Assert.assertEquals(AnalysisCancelResult.State.SUCCESS, cancelRequest.getState());
+}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java
new file mode 100755
index 0000000..7eb46d8
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/AnalysisRequestTrackerStoreTest.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+
+public class AnalysisRequestTrackerStoreTest extends ODFTestcase {
+
+	Logger logger = ODFTestLogger.get();
+
+	AnalysisRequestTracker generateTracker(String id, STATUS status) {
+		AnalysisRequestTracker tracker = new AnalysisRequestTracker();
+		Utils.setCurrentTimeAsLastModified(tracker);
+		tracker.setNextDiscoveryServiceRequest(0);
+		AnalysisRequest req = new AnalysisRequest();
+		req.setId(id);
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId("DataSet" + id);
+		req.setDataSets(Collections.singletonList(ref));
+		tracker.setRequest(req);
+		tracker.setStatus(status);
+		return tracker;
+	}
+
+	@Test
+	public void testStore() throws Exception {
+		AnalysisRequestTrackerStore store = (new ODFInternalFactory()).create(AnalysisRequestTrackerStore.class);
+		assertNotNull(store);
+		int MAX_TRACKERS = 50;
+		List<AnalysisRequestTracker> trackers1 = new ArrayList<AnalysisRequestTracker>();
+		STATUS lastStatus = STATUS.IN_DISCOVERY_SERVICE_QUEUE;
+		for (int i = 0; i < MAX_TRACKERS; i++) {
+			trackers1.add(generateTracker("STORETEST_ID" + i, lastStatus));
+		}
+
+		logger.info("Storing " + MAX_TRACKERS + " Trackers");
+		long pass1Start = System.currentTimeMillis();
+		for (AnalysisRequestTracker tracker : trackers1) {
+			store.store(tracker);
+		}
+		long pass1End = System.currentTimeMillis();
+
+		logger.info("Storing " + MAX_TRACKERS + " Trackers again with new status");
+
+		lastStatus = STATUS.FINISHED;
+		List<AnalysisRequestTracker> trackers2 = new ArrayList<AnalysisRequestTracker>();
+		for (int i = 0; i < MAX_TRACKERS; i++) {
+			trackers2.add(generateTracker("STORETEST_ID" + i, lastStatus));
+		}
+		long pass2Start = System.currentTimeMillis();
+		for (AnalysisRequestTracker tracker : trackers2) {
+			store.store(tracker);
+		}
+		long pass2End = System.currentTimeMillis();
+
+		Thread.sleep(2000);
+		logger.info("Querying and checking " + MAX_TRACKERS + " Trackers");
+
+		long queryStart = System.currentTimeMillis();
+
+		for (int i = 0; i < MAX_TRACKERS; i++) {
+			final String analysisRequestId = "STORETEST_ID" + i;
+			AnalysisRequestTracker tracker = store.query(analysisRequestId);
+			assertNotNull(tracker);
+			assertEquals(1, tracker.getRequest().getDataSets().size());
+			MetaDataObjectReference ref = new MetaDataObjectReference();
+			ref.setId("DataSet" + analysisRequestId);
+			assertEquals(tracker.getRequest().getDataSets().get(0), ref);
+			assertEquals(lastStatus, tracker.getStatus());
+		}
+		long queryEnd = System.currentTimeMillis();
+
+		System.out.println("First pass: " + (pass1End - pass1Start) + "ms, second pass: " + (pass2End - pass2Start) + "ms, query: " + (queryEnd - queryStart) + "ms");
+
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java
new file mode 100755
index 0000000..e3b3549
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DeclarativeRequestMapperTest.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.controlcenter.DeclarativeRequestMapper;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.controlcenter.ControlCenter;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+
+public class DeclarativeRequestMapperTest extends ODFTestBase {
+	final private static String SERVICE_CLASSNAME = "org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryService1";
+	final private static String[] EXPECTED_SERVICE_SEQUENCES = new String[] { "pre3,ser1", "alt1,ser1", "pre4,pre1,ser1", 
+			"pre3,ser1,ser3", "pre3,ser1,ser5", "alt1,ser1,ser3", "alt1,ser1,ser5", "pre3,pre2,ser4", "alt1,pre2,ser4", 
+			"pre4,pre1,ser1,ser3", "pre4,pre1,ser1,ser5", "pre3,ser1,alt1,ser3", "pre3,ser1,pre2,ser4", "pre3,ser1,alt1,ser5" };
+	private Logger logger = Logger.getLogger(ControlCenter.class.getName());
+
+	private static void createDiscoveryService(String serviceId, String[] resultingAnnotationTypes, String[] prerequisiteAnnotationTypes, String[] supportedObjectTypes) throws ValidationException, JSONException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
+		DiscoveryServiceJavaEndpoint dse = new DiscoveryServiceJavaEndpoint();
+		dse.setClassName(SERVICE_CLASSNAME);
+		dsProperties.setEndpoint(JSONUtils.convert(dse, DiscoveryServiceEndpoint.class));
+		dsProperties.setId(serviceId);
+		dsProperties.setName(serviceId + " Discovery Service");
+		dsProperties.setPrerequisiteAnnotationTypes(Arrays.asList(prerequisiteAnnotationTypes));
+		dsProperties.setResultingAnnotationTypes(Arrays.asList(resultingAnnotationTypes));
+		dsProperties.setSupportedObjectTypes(Arrays.asList(supportedObjectTypes));
+		discoveryServicesManager.createDiscoveryService(dsProperties);
+	}
+
+	private void deleteDiscoveryService(String serviceId, boolean failOnError) throws ValidationException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		try {
+			discoveryServicesManager.deleteDiscoveryService(serviceId);
+		}
+		catch (ServiceNotFoundException e) {
+			if (failOnError) {
+				Assert.fail("Error deleting discovery services.");
+			}
+		}		
+	}
+
+	private void deleteDiscoveryServices(boolean failOnError) throws ValidationException {
+		List<String> serviceIds = Arrays.asList(new String[] { "ser1", "ser2", "ser3", "ser4", "ser5", "pre1", "pre2", "pre3", "pre4", "alt1" });
+		for (String serviceId : serviceIds) {
+			deleteDiscoveryService(serviceId, failOnError);
+		}
+	}
+
+	private void createDiscoveryServices() throws ValidationException, JSONException {
+		createDiscoveryService("ser1", new String[] { "an1", "com1", "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("ser2", new String[] { "an2", "com1"         }, new String[] { "pre2"         }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("ser3", new String[] {                "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("ser4", new String[] { "an1", "com1", "com2" }, new String[] { "pre1", "pre2" }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("ser5", new String[] {        "com1", "com2" }, new String[] { "pre1"         }, new String[] { "Table", "DataFile" });
+
+		createDiscoveryService("pre1", new String[] { "pre1"                }, new String[] { "pre4"         }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("pre2", new String[] { "pre2"                }, new String[] {                }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("pre3", new String[] { "pre1"                }, new String[] {                }, new String[] { "Table", "DataFile" });
+		createDiscoveryService("pre4", new String[] { "pre4"                }, new String[] {                }, new String[] { "Table", "DataFile" });
+
+		createDiscoveryService("alt1", new String[] { "pre1"                }, new String[] {                }, new String[] { "Table", "DataFile" });
+	}
+
+	@Test
+	public void testDiscoveryServiceSequences() throws Exception {
+		deleteDiscoveryServices(false);
+		createDiscoveryServices();
+
+		AnalysisRequest request = new AnalysisRequest();
+		request.setAnnotationTypes(Arrays.asList( new String[] { "an1", "com2" }));
+		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
+		logger.log(Level.INFO, "Printing list of mapper result to stdout.");
+		int i = 0;
+		for (DeclarativeRequestMapper.DiscoveryServiceSequence discoveryApproach : mapper.getDiscoveryServiceSequences()) {
+			String sequence = Utils.joinStrings(new ArrayList<String>(discoveryApproach.getServiceSequence()), ',');
+			System.out.println(sequence);
+			if (i < EXPECTED_SERVICE_SEQUENCES.length) {
+				Assert.assertTrue(sequence.equals(EXPECTED_SERVICE_SEQUENCES[i++]));
+			}
+		}
+		Assert.assertEquals("Number of calculated discovery service sequences does not match expected value.", 36, mapper.getDiscoveryServiceSequences().size());
+
+		deleteDiscoveryServices(true);
+	}
+
+	@Test
+	public void testRecommendedDiscoveryServiceSequence() throws Exception {
+		deleteDiscoveryServices(false);
+		createDiscoveryServices();
+
+		AnalysisRequest request = new AnalysisRequest();
+		request.setAnnotationTypes(Arrays.asList( new String[] { "com2", "pre4" }));
+		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
+		Assert.assertEquals("Recommended sequence does not match expected string.", "pre4,pre1,ser1", Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
+
+		deleteDiscoveryServices(true);
+	}
+
+	@Test
+	public void testRemoveFailingService() throws Exception {
+		deleteDiscoveryServices(false);
+		createDiscoveryServices();
+
+		AnalysisRequest request = new AnalysisRequest();
+		request.setAnnotationTypes(Arrays.asList(new String[] { "an1", "com2" }));
+		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
+		Assert.assertEquals("Original sequence does not match expected string.", EXPECTED_SERVICE_SEQUENCES[0], Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
+
+		mapper.removeDiscoveryServiceSequences("ser1");
+		Assert.assertEquals("Updated sequence does not match expected string.", "pre3,pre2,ser4", Utils.joinStrings(mapper.getRecommendedDiscoveryServiceSequence(), ','));
+
+		deleteDiscoveryServices(true);
+	}
+
+	@Test
+	public void testRequestWithManyAnnotationTypes() throws Exception {
+		deleteDiscoveryServices(false);
+		createDiscoveryServices();
+
+		AnalysisRequest request = new AnalysisRequest();
+		request.setAnnotationTypes(Arrays.asList(new String[] {  "an1", "an2", "com1", "com2", "pre1", "pre2", "pre4" }));
+		DeclarativeRequestMapper mapper = new DeclarativeRequestMapper(request);
+		Assert.assertEquals("Number of calculated discovery service sequences does not match expected value.", 75, mapper.getDiscoveryServiceSequences().size());
+
+		deleteDiscoveryServices(true);
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java
new file mode 100755
index 0000000..96a4fee
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/DefaultThreadManagerTest.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
+import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+
+public class DefaultThreadManagerTest extends TimerTestBase {
+
+	int threadMS = 100;
+	int waitMS = 5000;
+	
+	Logger logger = ODFTestLogger.get();
+
+	class TestRunnable implements ODFRunnable {
+
+		String id;
+		boolean cancelled = false;
+		long msToWaitBeforeFinish;
+		
+		public TestRunnable(String id, long msToWaitBeforeFinish) {
+			this.id = id;
+			this.msToWaitBeforeFinish = msToWaitBeforeFinish;
+		}
+		
+		public TestRunnable(String id) {
+			this(id, threadMS);
+		}
+
+		@Override
+		public void run() {
+			logger.info("Starting thread with ID: " + id);
+			try {
+				Thread.sleep(msToWaitBeforeFinish);
+			} catch (InterruptedException e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			}
+			logger.info("Thread finished with ID: " + id);
+
+		}
+
+		@Override
+		public void setExecutorService(ExecutorService service) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void cancel() {
+			cancelled = true;
+		}
+
+		@Override
+		public boolean isReady() {
+			return true;
+		}
+
+	}
+
+	@Test
+	public void testSimple() throws Exception {
+		ODFInternalFactory f = new ODFInternalFactory();
+		ThreadManager tm = f.create(ThreadManager.class);
+		tm.setExecutorService(f.create(ExecutorServiceFactory.class).createExecutorService());
+		assertNotNull(tm);
+
+		String id1 = "id1";
+		String id2 = "id2";
+
+		// start id1
+		ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id1);
+		Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
+
+		boolean b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
+		assertTrue(b);
+		b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
+		assertFalse(b);
+
+		st = tm.getStateOfUnmanagedThread(id1);
+		Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, st);
+
+		// start id2
+		st = tm.getStateOfUnmanagedThread(id2);
+		Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
+
+		b = tm.startUnmanagedThread(id2, new TestRunnable(id2)).isNewThreadCreated();
+		assertTrue(b);
+		b = tm.startUnmanagedThread(id2, new TestRunnable(id2)).isNewThreadCreated();
+		assertFalse(b);
+
+		Thread.sleep(waitMS);
+		st = tm.getStateOfUnmanagedThread(id1);
+		Assert.assertEquals(ThreadStatus.ThreadState.FINISHED, st);
+		b = tm.startUnmanagedThread(id1, new TestRunnable(id1)).isNewThreadCreated();
+		assertTrue(b);
+
+		st = tm.getStateOfUnmanagedThread(id2);
+		// id2 should be removed from thread list
+		Assert.assertTrue(ThreadStatus.ThreadState.FINISHED.equals(st) || ThreadStatus.ThreadState.NON_EXISTENT.equals(st));
+
+		tm.shutdownThreads(Arrays.asList("id1", "id2"));
+	}
+
+	@Test
+	public void testManyThreads() throws Exception {
+		ODFInternalFactory f = new ODFInternalFactory();
+		ThreadManager tm = f.create(ThreadManager.class);
+		tm.setExecutorService(f.create(ExecutorServiceFactory.class).createExecutorService());
+
+		assertNotNull(tm);
+
+		List<String> threadIds = new ArrayList<>();
+		int THREAD_NUM = 20;
+		for (int i = 0; i < THREAD_NUM; i++) {
+			String id = "ThreadID" + i;
+			threadIds.add(id);
+			ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id);
+			Assert.assertEquals(ThreadStatus.ThreadState.NON_EXISTENT, st);
+
+			boolean b = tm.startUnmanagedThread(id, new TestRunnable(id)).isNewThreadCreated();
+			assertTrue(b);
+			b = tm.startUnmanagedThread(id, new TestRunnable(id)).isNewThreadCreated();
+			assertFalse(b);
+
+			st = tm.getStateOfUnmanagedThread(id);
+			Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, st);
+
+		}
+		logger.info("All threads scheduled");
+
+		Thread.sleep(waitMS);
+
+		for (int i = 0; i < THREAD_NUM; i++) {
+			String id = "ThreadID" + i;
+			ThreadStatus.ThreadState st = tm.getStateOfUnmanagedThread(id);
+			Assert.assertEquals(ThreadStatus.ThreadState.FINISHED, st);
+		}
+		tm.shutdownThreads(threadIds);
+
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java
new file mode 100755
index 0000000..900c214
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ODFAPITest.java
@@ -0,0 +1,373 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.UnknownDataSet;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
+import org.apache.atlas.odf.core.metadata.DefaultMetadataStore;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+
+public class ODFAPITest extends ODFTestBase {
+
+	public static int WAIT_MS_BETWEEN_POLLING = 500;
+	public static int MAX_NUMBER_OF_POLLS = 500;
+	public static String DUMMY_SUCCESS_ID = "success";
+	public static String DUMMY_ERROR_ID = "error";
+
+	public static void runRequestAndCheckResult(String dataSetID, AnalysisRequestStatus.State expectedState, int expectedProcessedDiscoveryRequests) throws Exception{
+		runRequestAndCheckResult(Collections.singletonList(dataSetID), expectedState, expectedProcessedDiscoveryRequests);
+	}
+	
+	public static void runRequestAndCheckResult(List<String> dataSetIDs, AnalysisRequestStatus.State expectedState, int expectedProcessedDiscoveryRequests) throws Exception{
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		String id = runRequest(dataSetIDs, analysisManager);
+		log.info("Running request "+id+" on data sets: " + dataSetIDs);
+		AnalysisRequestStatus status = null;
+
+		int maxPolls = MAX_NUMBER_OF_POLLS;
+		do {
+			status = analysisManager.getAnalysisRequestStatus(id);
+			log.log(Level.INFO, "{4}th poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
+					expectedState, (MAX_NUMBER_OF_POLLS-maxPolls) });
+			maxPolls--;
+			Thread.sleep(WAIT_MS_BETWEEN_POLLING);
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+
+		log.log(Level.INFO, "Polling result after {0} polls for request id {1}: status: {2}", new Object[] {(MAX_NUMBER_OF_POLLS-maxPolls), id, status.getState()});
+		
+		Assert.assertTrue(maxPolls > 0);		
+		Assert.assertEquals(expectedState, status.getState());
+		AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
+		AnalysisRequestTracker tracker = store.query(id);
+		Assert.assertNotNull(tracker);
+		checkTracker(tracker, expectedProcessedDiscoveryRequests);
+		log.info("Status details: " + status.getDetails());
+	}
+
+	static void checkTracker(AnalysisRequestTracker tracker, int expectedProcessedDiscoveryRequests) {
+		if (expectedProcessedDiscoveryRequests == -1) {
+			expectedProcessedDiscoveryRequests = tracker.getDiscoveryServiceRequests().size(); 
+		}
+		Assert.assertEquals(expectedProcessedDiscoveryRequests, tracker.getDiscoveryServiceResponses().size());
+		
+	}
+
+	static String runRequest(String dataSetID, AnalysisManager analysisManager) throws Exception {
+		return runRequest(Collections.singletonList(dataSetID), analysisManager);
+	}
+
+	public static String runRequest(List<String> dataSetIDs, AnalysisManager analysisManager) throws Exception {
+		AnalysisRequest request = createAnalysisRequest(dataSetIDs);
+		log.info("Starting analyis");
+		AnalysisResponse response = analysisManager.runAnalysis(request);
+		Assert.assertNotNull(response);
+		Assert.assertFalse(response.isInvalidRequest());
+		String id = response.getId();
+		Assert.assertNotNull(id);
+		return id;
+	}
+
+	
+	@Test
+	public void testSimpleSuccess() throws Exception {
+		runRequestAndCheckResult("successID", AnalysisRequestStatus.State.FINISHED, -1);
+	}
+
+	public static void waitForRequest(String requestId, AnalysisManager analysisManager) {
+		waitForRequest(requestId, analysisManager, MAX_NUMBER_OF_POLLS);
+	}
+	
+	public static void waitForRequest(String requestId, AnalysisManager analysisManager, int maxPolls) {
+		AnalysisRequestStatus status = null;
+
+		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
+		do {
+			status = analysisManager.getAnalysisRequestStatus(requestId);
+			
+			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
+			maxPolls--;
+			try {
+				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+				throw new RuntimeException(e);
+			}
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+		if (maxPolls == 0) {
+			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
+		}
+		log.log(Level.INFO, "Request ''{0}'' is finished with state: ''{1}''", new Object[] { requestId, status.getState() });
+	}
+
+	public static boolean waitForRequest(String requestId, AnalysisManager analysisManager, int maxPolls, AnalysisRequestStatus.State expectedState) {
+		AnalysisRequestStatus status = null;
+
+		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
+		do {
+			status = analysisManager.getAnalysisRequestStatus(requestId);
+			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
+			maxPolls--;
+			try {
+				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+				throw new RuntimeException(e);
+			}
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+		if (maxPolls == 0) {
+			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
+		}
+		return expectedState.equals(status.getState());
+	}
+
+	
+	@Test
+	public void testSimpleSuccessDuplicate() throws Exception {
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		String id = runRequest("successID", analysisManager);
+		String secondId = runRequest("successID", analysisManager);
+		Assert.assertNotEquals(id, secondId);
+		//Wait limit and try if new analysis is started
+		Thread.sleep(DefaultStatusQueueStore.IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS*2 + 5000);
+		String thirdId = runRequest("successID", analysisManager);
+		Assert.assertNotEquals(secondId, thirdId);
+		waitForRequest(id, analysisManager);
+		waitForRequest(thirdId, analysisManager);
+	}
+
+	@Test
+	public void testSimpleSuccessNoDuplicate() throws Exception {
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		String id = runRequest("successID", analysisManager);
+		String secondId = runRequest("successID2", analysisManager);
+		Assert.assertNotEquals(id, secondId);
+		waitForRequest(id, analysisManager);
+		waitForRequest(secondId, analysisManager);
+	}
+
+	@Test
+	public void testSimpleSuccessDuplicateSubset() throws Exception {
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		String id = runRequest(Arrays.asList("successID", "successID2", "successID3"), analysisManager);
+		String secondId = runRequest("successID2", analysisManager);
+		Assert.assertNotEquals(id, secondId);
+		Thread.sleep(DefaultStatusQueueStore.IGNORE_SIMILAR_REQUESTS_TIMESPAN_MS + 5000);
+		String thirdId = runRequest("successID", analysisManager);
+		Assert.assertNotEquals(secondId, thirdId);
+		waitForRequest(id, analysisManager);
+		waitForRequest(thirdId, analysisManager);
+	}
+	
+	/**
+	 * This test depends on the speed of execution.
+	 * An analysis that is not in state INITIALIZED or IN_SERVICE_QUEUE cannot be cancelled. 
+	 * Therefore if the analysis is started too quickly this test will fail!
+	 * 
+	 * Ignore for now as this can go wrong in the build.
+	 */
+	@Test
+	@Ignore
+	public void testCancelRequest() throws Exception {
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		String id = runRequest(Arrays.asList("successID", "successID2", "successID3"), analysisManager);
+		AnalysisCancelResult cancelAnalysisRequest = analysisManager.cancelAnalysisRequest(id);
+		Assert.assertEquals(cancelAnalysisRequest.getState(), AnalysisCancelResult.State.SUCCESS);
+		String secondId = runRequest("successID2", analysisManager);
+		Assert.assertNotEquals(id, secondId);
+	}
+
+	
+	@Test
+	public void testRequestsWithDataSetListSuccess() throws Exception {
+		runRequestAndCheckResult(Arrays.asList("success1", "success2", "success3"), AnalysisRequestStatus.State.FINISHED, 6);
+	}
+	
+	@Test
+	public void testRequestsWithDataSetListError() throws Exception {
+		runRequestAndCheckResult(Arrays.asList("success1", "error2", "success3"), AnalysisRequestStatus.State.ERROR, 3);
+	}
+
+		
+
+	@Test
+	public void testSimpleFailure() throws Exception {
+		runRequestAndCheckResult("errorID", AnalysisRequestStatus.State.ERROR, 1);
+	}
+	
+	@Test 
+	public void testManyRequests()  throws Exception {
+		List<String> dataSets = new ArrayList<String>();
+		List<AnalysisRequestStatus.State> expectedStates = new ArrayList<AnalysisRequestStatus.State>();
+		int dataSetNum = 5;
+		for (int i=0; i<dataSetNum; i++) {
+			AnalysisRequestStatus.State expectedState = AnalysisRequestStatus.State.FINISHED;
+			String dataSet = "successdataSet" + i;
+			if (i % 3 == 0) {
+				// every third data set should fail
+				dataSet = "errorDataSet" + i;
+				expectedState = AnalysisRequestStatus.State.ERROR;
+			} 
+			dataSets.add(dataSet);
+			expectedStates.add(expectedState);
+		}
+		
+		runRequests(dataSets, expectedStates);
+	}
+
+	public void runRequests(List<String> dataSetIDs, List<AnalysisRequestStatus.State> expectedStates) throws Exception {
+		Assert.assertTrue(dataSetIDs.size() == expectedStates.size());
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+
+		Map<AnalysisRequest, AnalysisRequestStatus.State> request2ExpectedState = new HashMap<AnalysisRequest, AnalysisRequestStatus.State>();
+
+		for (int i = 0; i < dataSetIDs.size(); i++) {
+			String dataSetID = dataSetIDs.get(i);
+			AnalysisRequestStatus.State expectedState = expectedStates.get(i);
+
+			AnalysisRequest request = createAnalysisRequest(Collections.singletonList(dataSetID));
+
+			log.info("Starting analyis");
+			AnalysisResponse response = analysisManager.runAnalysis(request);
+			Assert.assertNotNull(response);
+			String id = response.getId();
+			Assert.assertFalse(response.isInvalidRequest());
+			Assert.assertNotNull(id);
+			request.setId(id);
+			request2ExpectedState.put(request, expectedState);
+		}
+
+		//		Set<AnalysisRequest> finishedRequests = new HashSet<AnalysisRequest>();
+		Map<AnalysisRequest, AnalysisRequestStatus> actualFinalStatePerRequest = new HashMap<AnalysisRequest, AnalysisRequestStatus>();
+		int maxPollPasses = 10;
+		for (int i = 0; i < maxPollPasses; i++) {
+			log.info("Polling all requests for the " + i + " th time");
+			boolean allRequestsFinished = true;
+			for (Map.Entry<AnalysisRequest, AnalysisRequestStatus.State> entry : request2ExpectedState.entrySet()) {
+
+				AnalysisRequest request = entry.getKey();
+				String id = request.getId();
+				if (actualFinalStatePerRequest.containsKey(request)) {
+					log.log(Level.INFO, "Request with ID ''{0}'' already finished, skipping it", id);
+				} else {
+					allRequestsFinished = false;
+
+					AnalysisRequestStatus.State expectedState = entry.getValue();
+
+					AnalysisRequestStatus status = null;
+
+					int maxPollsPerRequest = 3;
+					do {
+						status = analysisManager.getAnalysisRequestStatus(id);
+						log.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''",
+								new Object[] { id, status.getState(), status.getDetails(), expectedState });
+						maxPollsPerRequest--;
+						Thread.sleep(1000);
+					} while (maxPollsPerRequest > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+
+					if (maxPollsPerRequest > 0) {
+						// final state found
+						actualFinalStatePerRequest.put(request, status);
+						//				Assert.assertEquals(expectedState, status.getState());
+					}
+				}
+			}
+			if (allRequestsFinished) {
+				log.info("All requests finished");
+				break;
+			}
+		}
+		Assert.assertTrue(actualFinalStatePerRequest.size() == request2ExpectedState.size());
+		Assert.assertTrue(actualFinalStatePerRequest.keySet().equals(request2ExpectedState.keySet()));
+		for (Map.Entry<AnalysisRequest, AnalysisRequestStatus> actual : actualFinalStatePerRequest.entrySet()) {
+			AnalysisRequest req = actual.getKey();
+			Assert.assertNotNull(req);
+			AnalysisRequestStatus.State expectedState = request2ExpectedState.get(req);
+			Assert.assertNotNull(expectedState);
+			AnalysisRequestStatus.State actualState = actual.getValue().getState();
+			Assert.assertNotNull(actualState);
+
+			log.log(Level.INFO, "Checking request ID ''{0}'', actual state: ''{1}'', expected state: ''{2}''", new Object[] { req.getId(), actualState, expectedState });
+			Assert.assertNotNull(expectedState);
+			Assert.assertEquals(expectedState, actualState);
+		}
+	}
+
+	public static AnalysisRequest createAnalysisRequest(List<String> dataSetIDs) throws JSONException {
+		AnalysisRequest request = new AnalysisRequest();
+		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		if (!(mds instanceof DefaultMetadataStore)) {
+			throw new RuntimeException(MessageFormat.format("This tests does not work with metadata store implementation \"{0}\" but only with the DefaultMetadataStore.", mds.getClass().getName()));
+		}
+		DefaultMetadataStore defaultMds = (DefaultMetadataStore) mds;
+		defaultMds.resetAllData();
+		for (String id : dataSetIDs) {
+			MetaDataObjectReference mdr = new MetaDataObjectReference();
+			mdr.setId(id);
+			dataSetRefs.add(mdr);
+			if (id.startsWith(DUMMY_SUCCESS_ID) || id.startsWith(DUMMY_ERROR_ID)) {
+				log.info("Creating dummy data set for reference : " + id.toString());
+				DataSet ds = new UnknownDataSet();
+				ds.setReference(mdr);
+				defaultMds.createObject(ds);
+			}
+		}
+		defaultMds.commit();
+		request.setDataSets(dataSetRefs);
+		List<String> serviceIds = Arrays.asList(new String[]{"asynctestservice", "synctestservice"});
+		/* use a fix list of services 
+		List<DiscoveryServiceRegistrationInfo> registeredServices = new ODFFactory().create(ControlCenter.class).getConfig().getRegisteredServices();		
+		for(DiscoveryServiceRegistrationInfo service : registeredServices){
+			serviceIds.add(service.getId());
+		}
+		*/
+		request.setDiscoveryServiceSequence(serviceIds);
+		Map<String, Object> additionalProps = new HashMap<String, Object>();
+		additionalProps.put("aaa", "bbb");
+		JSONObject jo = new JSONObject();
+		jo.put("p1", "v1");
+		jo.put("p2", "v2");
+		additionalProps.put("jo", jo);
+		request.setAdditionalProperties(additionalProps);
+		return request;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java
new file mode 100755
index 0000000..9aa3ba4
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/ParallelODFTest.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.engine.EngineManager;
+import org.apache.atlas.odf.api.engine.SystemHealth;
+import org.apache.atlas.odf.api.engine.SystemHealth.HealthStatus;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.analysis.AnalysisManagerImpl;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+
+public class ParallelODFTest extends ODFTestcase {
+	Logger log = ODFTestLogger.get();
+	
+	@Test
+	public void runDataSetsInParallelSuccess() throws Exception {
+		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "successID2" }), State.FINISHED);
+	}
+
+	@Test 
+	public void runDataSetsInParallelError() throws Exception {
+		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "errorID2" }), State.ERROR);
+	}
+
+	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, State expectedState) throws Exception {
+		log.info("Running data sets in parallel: " + dataSetIDs);
+		log.info("Expected state: " + expectedState);
+		AnalysisRequest req = ODFAPITest.createAnalysisRequest(dataSetIDs);
+		// Enable parallel processing because this is a parallel test
+		req.setProcessDataSetsSequentially(false);
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		EngineManager engineManager = new ODFFactory().create().getEngineManager();
+
+		SystemHealth healthCheckResult = engineManager.checkHealthStatus();
+		Assert.assertEquals(HealthStatus.OK, healthCheckResult.getStatus());
+		AnalysisResponse resp = analysisManager.runAnalysis(req);
+		log.info("Parallel requests started");
+
+		String id = resp.getId();
+		List<String> singleIds = Utils.splitString(id, AnalysisManagerImpl.COMPOUND_REQUEST_SEPARATOR);
+		List<String> singleDetails = Utils.splitString(resp.getDetails(), AnalysisManagerImpl.COMPOUND_REQUEST_SEPARATOR);
+		Assert.assertEquals(dataSetIDs.size(), singleIds.size());
+		Assert.assertEquals(dataSetIDs.size(), singleDetails.size());
+
+		AnalysisRequestStatus status = null;
+
+		// check that requests are processed in parallel: 
+		//   there must be a point in time where both requests are in status "active"
+		log.info("Polling for status of parallel request...");
+		boolean foundPointInTimeWhereBothRequestsAreActive = false;
+		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
+		do {
+			List<State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
+			for (String singleId : singleIds) {
+				allSingleStates.add(analysisManager.getAnalysisRequestStatus(singleId).getState());
+			}
+			if (Utils.containsOnly(allSingleStates, new State[] { State.ACTIVE })) {
+				foundPointInTimeWhereBothRequestsAreActive = true;
+			}
+
+			status = analysisManager.getAnalysisRequestStatus(id);
+			log.log(Level.INFO, "Poll request for parallel request ID ''{0}'' (expected state: ''{3}''): state: ''{1}'', details: ''{2}''", new Object[] { id, status.getState(), status.getDetails(),
+					expectedState });
+			log.info("States of single requests: " + singleIds + ": " + allSingleStates);
+			maxPolls--;
+			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
+		} while (maxPolls > 0 && (status.getState() == State.ACTIVE || status.getState() == State.QUEUED));
+
+		Assert.assertTrue(maxPolls > 0);
+		Assert.assertEquals(expectedState, status.getState());
+		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
+		log.info("Parallel request status details: " + status.getDetails());
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java
new file mode 100755
index 0000000..9a43b78
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/controlcenter/SetTrackerStatusTest.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.controlcenter;
+
+import java.util.logging.Level;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+
+public class SetTrackerStatusTest extends ODFTestBase {
+
+	@Test
+	public void testSetTrackerStatus() throws Exception {
+		AnalysisManager am = new ODFFactory().create().getAnalysisManager();
+		AnalysisRequestTrackerStore arts = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
+		String requestId = ODFAPITest.runRequest("successId", am);
+		Thread.sleep(1000);
+		long cutOffTimestamp = System.currentTimeMillis();		
+		String testMessage = "Message was set to error at " + cutOffTimestamp;
+		arts.setStatusOfOldRequest(cutOffTimestamp, STATUS.ERROR, testMessage);
+		AnalysisRequestTracker tracker = arts.query(requestId);
+		Assert.assertEquals(STATUS.ERROR, tracker.getStatus());
+		Assert.assertEquals(testMessage, tracker.getStatusDetails());
+		
+		// wait until request is finished and state is set back to finished
+		log.log(Level.INFO, "Waiting for request ''{0}'' to finish", requestId);
+		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
+		AnalysisRequestStatus status = null;
+		do {
+			status = am.getAnalysisRequestStatus(requestId);
+			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
+			maxPolls--;
+			try {
+				Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
+			} catch (InterruptedException e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			}
+		} while (maxPolls > 0 && (status.getState() != AnalysisRequestStatus.State.FINISHED) );
+		
+		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, am.getAnalysisRequestStatus(requestId).getState());
+		tracker = arts.query(requestId);
+		Assert.assertEquals(STATUS.FINISHED, tracker.getStatus());
+	}
+	
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java
new file mode 100755
index 0000000..5f923a3
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/DiscoveryServiceManagerTest.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.discoveryservice;
+
+import java.io.InputStream;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceJavaEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+
+public class DiscoveryServiceManagerTest {
+	
+	final private static String ASYNCTESTWA_SERVICE_ID = "asynctestservice-with-annotations";
+
+	final private static String NEW_SERVICE_ID = "New_Service";
+	final private static String NEW_SERVICE_NAME = "Name of New Service";
+	final private static String NEW_SERVICE_DESCRIPTION = "Description of the New Service";
+	final private static String NEW_SERVICE_CLASSNAME = "org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryService1";
+	
+	final private static String UPDATED_SERVICE_DESCRIPTION = "Updated description of the New Service";
+	final private static String UPDATED_SERVICE_CLASSNAME = "org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryService1";
+	
+	private void registerDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		discoveryServicesManager.createDiscoveryService(dsProperties);
+	}
+	
+	private void replaceDiscoveryService(DiscoveryServiceProperties dsProperties) throws ValidationException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		discoveryServicesManager.replaceDiscoveryService(dsProperties);
+	}
+	
+	private void unregisterDiscoveryService(String serviceId) throws ServiceNotFoundException, ValidationException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		discoveryServicesManager.deleteDiscoveryService(serviceId);
+	}
+		
+	@Test
+	public void testGetDiscoveryServiceProperties() throws ServiceNotFoundException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		DiscoveryServiceProperties dsProperties = discoveryServicesManager.getDiscoveryServiceProperties(ASYNCTESTWA_SERVICE_ID);
+		Assert.assertNotNull(dsProperties);
+	}
+	
+		
+	@Ignore @Test    // Ignoring testcase due to problem on Mac (issue #56)
+	public void testGetDiscoveryServiceStatus() throws ServiceNotFoundException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		DiscoveryServiceStatus dsStatus = discoveryServicesManager.getDiscoveryServiceStatus(ASYNCTESTWA_SERVICE_ID);
+		Assert.assertNotNull(dsStatus);
+	}
+	
+	@Test  // TODO: need to adjust as soon as runtime statistics are available
+	public void testGetDiscoveryServiceRuntimeStatistics() throws ServiceNotFoundException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		DiscoveryServiceRuntimeStatistics dsRuntimeStats = discoveryServicesManager.getDiscoveryServiceRuntimeStatistics(ASYNCTESTWA_SERVICE_ID);
+		Assert.assertNotNull(dsRuntimeStats);
+		long avgProcTime = dsRuntimeStats.getAverageProcessingTimePerItemInMillis();
+		Assert.assertEquals(0, avgProcTime);
+	}
+
+	@Test
+	public void testDeleteDiscoveryServiceRuntimeStatistics() throws ServiceNotFoundException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		discoveryServicesManager.deleteDiscoveryServiceRuntimeStatistics(ASYNCTESTWA_SERVICE_ID);
+	}
+
+	@Test
+	public void testGetDiscoveryServiceImage() throws ServiceNotFoundException {
+		DiscoveryServiceManager discoveryServicesManager = new ODFFactory().create().getDiscoveryServiceManager();
+		InputStream is = discoveryServicesManager.getDiscoveryServiceImage(ASYNCTESTWA_SERVICE_ID);
+		Assert.assertNull(is);
+	}
+
+	@Test
+	public void testCreateUpdateDelete() throws ServiceNotFoundException, ValidationException, JSONException {
+		DiscoveryServiceJavaEndpoint dse = new DiscoveryServiceJavaEndpoint();
+		dse.setClassName(NEW_SERVICE_CLASSNAME);
+		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
+		dsProperties.setId(NEW_SERVICE_ID);
+		dsProperties.setName(NEW_SERVICE_NAME);
+		dsProperties.setDescription(NEW_SERVICE_DESCRIPTION);
+		dsProperties.setLink(null);
+		dsProperties.setPrerequisiteAnnotationTypes(null);
+		dsProperties.setResultingAnnotationTypes(null);
+		dsProperties.setSupportedObjectTypes(null);
+		dsProperties.setAssignedObjectTypes(null);
+		dsProperties.setAssignedObjectCandidates(null);
+		dsProperties.setEndpoint(JSONUtils.convert(dse, DiscoveryServiceEndpoint.class));
+		dsProperties.setParallelismCount(2);
+		registerDiscoveryService(dsProperties);
+
+		DiscoveryServiceJavaEndpoint dse2 = new DiscoveryServiceJavaEndpoint();
+		dse2.setClassName(UPDATED_SERVICE_CLASSNAME);
+		DiscoveryServiceProperties dsProperties2 = new DiscoveryServiceProperties();
+		dsProperties2.setId(NEW_SERVICE_ID);
+		dsProperties2.setName(NEW_SERVICE_NAME);
+		dsProperties2.setDescription(UPDATED_SERVICE_DESCRIPTION);
+		dsProperties2.setLink(null);
+		dsProperties.setPrerequisiteAnnotationTypes(null);
+		dsProperties.setResultingAnnotationTypes(null);
+		dsProperties.setSupportedObjectTypes(null);
+		dsProperties.setAssignedObjectTypes(null);
+		dsProperties.setAssignedObjectCandidates(null);
+		dsProperties2.setEndpoint(JSONUtils.convert(dse2, DiscoveryServiceEndpoint.class));
+		dsProperties2.setParallelismCount(2);
+		replaceDiscoveryService(dsProperties2);
+
+		unregisterDiscoveryService(NEW_SERVICE_ID);
+	}
+	
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java
new file mode 100755
index 0000000..2ea85b7
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryService1.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.discoveryservice;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
+import org.junit.Assert;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class TestAsyncDiscoveryService1 extends DiscoveryServiceBase implements AsyncDiscoveryService {
+
+	static int unavailableCounter = 0;
+
+	static Logger logger = ODFTestLogger.get();
+
+	public static void checkUserAndAdditionalProperties(DiscoveryServiceRequest request) {
+		String user = request.getUser();
+		
+		String defaultUser = System.getProperty("user.name");
+		Assert.assertEquals(defaultUser, user);
+
+		Map<String, Object> additionalProperties = request.getAdditionalProperties();
+		logger.info("TestAsyncDiscoveryService1.startAnalysis additional properties: " + additionalProperties);
+		Assert.assertNotNull(additionalProperties);
+		
+		// check that environment entries are also available additional properties
+		Environment ev = new ODFInternalFactory().create(Environment.class);
+		String dsId = request.getDiscoveryServiceId();
+		Map<String, String> serviceEnvProps = ev.getPropertiesWithPrefix(dsId);
+		if (!serviceEnvProps.isEmpty()) {
+			Assert.assertTrue(!additionalProperties.isEmpty());
+			for (Map.Entry<String, String> serviceEnvProp : serviceEnvProps.entrySet()) {
+				String key = serviceEnvProp.getKey();
+				String val = serviceEnvProp.getValue();
+				logger.info("Found discoveryservice configuration parameter: " + key + " with value " + val);
+				Assert.assertTrue(key.startsWith(dsId));
+				Assert.assertTrue(additionalProperties.containsKey(key) );
+				Assert.assertEquals(val, additionalProperties.get(key));
+			}
+		}
+		
+		if (!additionalProperties.isEmpty()) {
+			Assert.assertTrue(additionalProperties.containsKey("aaa"));
+			Assert.assertTrue("bbb".equals(additionalProperties.get("aaa")));
+			Assert.assertTrue(additionalProperties.containsKey("jo"));
+			@SuppressWarnings("unchecked")
+			Map<String, Object> m = (Map<String, Object>) additionalProperties.get("jo");
+			Assert.assertTrue("v1".equals(m.get("p1")));
+			Assert.assertTrue("v2".equals(m.get("p2")));
+			/*
+			if (!additionalProperties.containsKey("aaa")) {
+				response.setCode(ResponseCode.UNKNOWN_ERROR);
+				response.setDetails("Additional property value 'aaa' doesn't exist");
+				return;
+			}
+			if (!"bbb".equals(additionalProperties.get("aaa"))) {
+				response.setCode(ResponseCode.UNKNOWN_ERROR);
+				response.setDetails("Additional properties 'aaa' has wrong value");
+				return;
+			}
+			if (!additionalProperties.containsKey("jo")) {
+				response.setCode(ResponseCode.UNKNOWN_ERROR);
+				response.setDetails("Additional property value 'jo' doesn't exist");
+				return;
+			}
+			Map m = (Map) additionalProperties.get("jo");
+			if (!"v1".equals(m.get("p1"))) {
+				response.setCode(ResponseCode.UNKNOWN_ERROR);
+				response.setDetails("Additional property value 'jo.p1' doesn't exist");
+				return;
+
+			}
+			if (!"v2".equals(m.get("p2"))) {
+				response.setCode(ResponseCode.UNKNOWN_ERROR);
+				response.setDetails("Additional property value 'jo.p2' doesn't exist");
+				return;
+			}
+			*/
+		}
+	}
+	
+	@Override
+	public DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request) {
+		try {
+			DiscoveryServiceResponse.ResponseCode code = DiscoveryServiceResponse.ResponseCode.TEMPORARILY_UNAVAILABLE;
+			String details = "Cannot answer right now";
+			if (unavailableCounter % 2 == 0) {
+				code = DiscoveryServiceResponse.ResponseCode.OK;
+				details = "Everything's peachy";
+			}
+			unavailableCounter++;
+			/*
+			if (unavailableCounter % 3 == 0) {
+				code = CODE.NOT_AUTHORIZED;
+				details = "You have no power here!";
+			}
+			*/
+			DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
+			response.setCode(code);
+			response.setDetails(details);
+			if (code == DiscoveryServiceResponse.ResponseCode.OK) {
+				String runid = "TestAsyncService1" + UUID.randomUUID().toString();
+				synchronized (lock) {
+					runIDsRunning.put(runid, 4); // return status "running" 4 times before finishing
+				}
+				response.setRunId(runid);
+				String dataSetId = request.getDataSetContainer().getDataSet().getReference().getId();
+				if (dataSetId.startsWith("error")) {
+					logger.info("TestAsync Discovery Service run " + runid + " will fail");
+					runIDsWithError.add(runid);
+				} else {
+					logger.info("TestAsync Discovery Service run " + runid + " will succeed");
+				}
+			}
+			logger.info("TestAsyncDiscoveryService1.startAnalysis returns: " + JSONUtils.lazyJSONSerializer(response));
+			checkUserAndAdditionalProperties(request);
+			/*
+			String user = request.getUser();
+			Assert.assertEquals(TestControlCenter.TEST_USER_ID, user);
+
+			Map<String, Object> additionalProperties = request.getAdditionalProperties();
+			logger.info("TestAsyncDiscoveryService1.startAnalysis additional properties: " + additionalProperties);
+			Assert.assertNotNull(additionalProperties);
+			if (!additionalProperties.isEmpty()) {
+				if (!additionalProperties.containsKey("aaa")) {
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Additional property value 'aaa' doesn't exist");
+					return response;
+				}
+				if (!"bbb".equals(additionalProperties.get("aaa"))) {
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Additional properties 'aaa' has wrong value");
+					return response;
+				}
+				if (!additionalProperties.containsKey("jo")) {
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Additional property value 'jo' doesn't exist");
+					return response;
+				}
+				Map m = (Map) additionalProperties.get("jo");
+				if (!"v1".equals(m.get("p1"))) {
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Additional property value 'jo.p1' doesn't exist");
+					return response;
+
+				}
+				if (!"v2".equals(m.get("p2"))) {
+					response.setCode(ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Additional property value 'jo.p2' doesn't exist");
+					return response;
+				}
+			}
+			*/
+			return response;
+		} catch (Throwable t) {
+			DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
+			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+			response.setDetails(Utils.getExceptionAsString(t));
+			return response;
+		}
+	}
+
+	static Object lock = new Object();
+	static Map<String, Integer> runIDsRunning = new HashMap<String, Integer>();
+	static Set<String> runIDsWithError = Collections.synchronizedSet(new HashSet<String>());
+
+	//	static Map<String, Integer> requestIDUnavailable = new HashMap<>();
+
+	@Override
+	public DiscoveryServiceAsyncRunStatus getStatus(String runId) {
+		String details = "Run like the wind";
+		DiscoveryServiceAsyncRunStatus.State state = DiscoveryServiceAsyncRunStatus.State.RUNNING;
+		synchronized (lock) {
+			Integer i = runIDsRunning.get(runId);
+			Assert.assertNotNull(i);
+			if (i.intValue() == 0) {
+				if (runIDsWithError.contains(runId)) {
+					state = DiscoveryServiceAsyncRunStatus.State.ERROR;
+					details = "This was a mistake";
+				} else {
+					state = DiscoveryServiceAsyncRunStatus.State.FINISHED;
+					details = "Finish him!";
+				}
+			} else {
+				runIDsRunning.put(runId, i - 1);
+			}
+		}
+
+		DiscoveryServiceAsyncRunStatus status = new DiscoveryServiceAsyncRunStatus();
+		status.setRunId(runId);
+		status.setDetails(details);
+		status.setState(state);
+		logger.info("TestAsyncDiscoveryService1.getStatus returns: " + JSONUtils.lazyJSONSerializer(status));
+
+		return status;
+	}
+
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java
new file mode 100755
index 0000000..bd2f1a6
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestAsyncDiscoveryServiceWritingAnnotations1.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.discoveryservice;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.async.AsyncDiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncRunStatus;
+import org.apache.atlas.odf.api.discoveryservice.async.DiscoveryServiceAsyncStartResponse;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+public class TestAsyncDiscoveryServiceWritingAnnotations1 extends DiscoveryServiceBase implements AsyncDiscoveryService {
+
+	static Logger logger = ODFTestLogger.get();
+
+	static Map<String, MyThread> id2Thread = Collections.synchronizedMap(new HashMap<String, MyThread>());
+
+	class MyThread extends Thread {
+
+		String errorMessage = null;
+		String correlationId;
+		MetaDataObjectReference dataSetRef;
+
+		public MyThread(MetaDataObjectReference dataSetRef, String correlationId) {
+			super();
+			this.dataSetRef = dataSetRef;
+			this.correlationId = correlationId;
+		}
+
+		@Override
+		public void run() {
+			this.errorMessage = TestSyncDiscoveryServiceWritingAnnotations1.createAnnotations(dataSetRef, correlationId, metadataStore, annotationStore);
+		}
+
+	}
+
+	@Override
+	public DiscoveryServiceAsyncStartResponse startAnalysis(DiscoveryServiceRequest request) {
+		DiscoveryServiceAsyncStartResponse response = new DiscoveryServiceAsyncStartResponse();
+		MetaDataObjectReference dataSetRef = request.getDataSetContainer().getDataSet().getReference();
+
+		String newRunID = "RunId-" + this.getClass().getSimpleName() + "-" + UUID.randomUUID().toString();
+		MyThread t = new MyThread(dataSetRef, (String) request.getAdditionalProperties().get(TestSyncDiscoveryServiceWritingAnnotations1.REQUEST_PROPERTY_CORRELATION_ID));
+		t.start();
+		id2Thread.put(newRunID, t);
+		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+		response.setRunId(newRunID);
+		response.setDetails("Thread started");
+		logger.info("Analysis writing annotations has started");
+
+		return response;
+	}
+
+	@Override
+	public DiscoveryServiceAsyncRunStatus getStatus(String runId) {
+		DiscoveryServiceAsyncRunStatus status = new DiscoveryServiceAsyncRunStatus();
+
+		MyThread t = id2Thread.get(runId);
+		status.setRunId(runId);
+		if (t == null) {
+			status.setState(DiscoveryServiceAsyncRunStatus.State.NOT_FOUND);
+		} else {
+			java.lang.Thread.State ts = t.getState();
+			if (!ts.equals(java.lang.Thread.State.TERMINATED)) {
+				status.setState(DiscoveryServiceAsyncRunStatus.State.RUNNING);
+			} else {
+				if (t.errorMessage != null) {
+					status.setState(DiscoveryServiceAsyncRunStatus.State.ERROR);
+					status.setDetails(t.errorMessage);
+				} else {
+					status.setState(DiscoveryServiceAsyncRunStatus.State.FINISHED);
+					status.setDetails("All went fine");
+				}
+			}
+		}
+		logger.info("Status of analysis with annotations: " + status.getState() + ", " + status.getDetails());
+		return status;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java
new file mode 100755
index 0000000..9ea92f3
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryService1.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.discoveryservice;
+
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+public class TestSyncDiscoveryService1 extends DiscoveryServiceBase implements SyncDiscoveryService {
+	static int unavailableCounter = 0;
+
+	Logger logger = ODFTestLogger.get();
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		try {
+			DiscoveryServiceResponse.ResponseCode code = DiscoveryServiceResponse.ResponseCode.TEMPORARILY_UNAVAILABLE;
+			String details = "Cannot answer right now synchronously";
+			if (unavailableCounter % 2 == 0) {
+				code = DiscoveryServiceResponse.ResponseCode.OK;
+				details = "Everything's peachy and synchronous";
+			}
+			unavailableCounter++;
+			DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+			response.setDetails(details);
+			response.setCode(code);
+			if (code == DiscoveryServiceResponse.ResponseCode.OK) {
+				String dataSetId = request.getDataSetContainer().getDataSet().getReference().getId();
+				if (dataSetId.startsWith("error")) {
+					response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+					response.setDetails("Something went synchronously wrong!");
+				} else {
+					response.setDetails("All is synchronously fine!");
+				}
+				TestAsyncDiscoveryService1.checkUserAndAdditionalProperties(request);
+			}
+			logger.info(this.getClass().getSimpleName() + " service returned with code: " + response.getCode());
+			return response;
+		} catch (Throwable t) {
+			t.printStackTrace();
+			return null;
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java
new file mode 100755
index 0000000..62c7bf6
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/discoveryservice/TestSyncDiscoveryServiceWritingAnnotations1.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.discoveryservice;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.discoveryservice.sync.SyncDiscoveryService;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+
+import org.apache.atlas.odf.api.metadata.models.CachedMetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataSet;
+import org.apache.atlas.odf.api.metadata.models.MetaDataCache;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+
+public class TestSyncDiscoveryServiceWritingAnnotations1 extends DiscoveryServiceBase implements SyncDiscoveryService {
+
+	static Logger logger = Logger.getLogger(TestSyncDiscoveryServiceWritingAnnotations1.class.getName());
+
+	public static String checkMetaDataCache(DiscoveryServiceRequest request) {
+		logger.info("Checking metadata cache");
+		MetaDataObject mdo = request.getDataSetContainer().getDataSet();
+		MetaDataCache cache = request.getDataSetContainer().getMetaDataCache();
+		if (cache == null) {
+			return null;
+		}
+		CachedMetadataStore cacheReader = new CachedMetadataStore(cache);
+
+		if (mdo instanceof RelationalDataSet) {
+			logger.info("Checking metadata cache for columns...");
+			RelationalDataSet rds = (RelationalDataSet) mdo;
+			Set<MetaDataObjectReference> cachedColumns = new HashSet<>();
+			Set<MetaDataObjectReference> actualColumns = new HashSet<>();
+			for (MetaDataObject col : cacheReader.getColumns(rds)) {
+				cachedColumns.add(col.getReference());
+			}
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			for (MetaDataObject col : mds.getColumns(rds)) {
+				actualColumns.add(col.getReference());
+			}
+			Assert.assertTrue("Columns missing from metadata cache.", cachedColumns.containsAll(actualColumns));
+			Assert.assertTrue("Too many columns in metadata cache.", actualColumns.containsAll(cachedColumns));
+		}
+		return null;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		logger.info("Analysis started on sync test service with annotations ");
+		String errorMessage = createAnnotations( //
+				request.getDataSetContainer().getDataSet().getReference(), //
+				(String) request.getAdditionalProperties().get(REQUEST_PROPERTY_CORRELATION_ID), //
+				metadataStore, //
+				annotationStore);
+		if (errorMessage == null) {
+			errorMessage = checkMetaDataCache(request);
+		}
+		DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
+		if (errorMessage == null) {
+			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+			resp.setDetails("Annotations created successfully");
+		} else {
+			resp.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+			resp.setDetails(errorMessage);
+		}
+		logger.info("Analysis finished on sync test service with annotations ");
+
+		return resp;
+	}
+
+	public static final String REQUEST_PROPERTY_CORRELATION_ID = "REQUEST_PROPERTY_CORRELATION_ID";
+
+	static final String ANNOTATION_TYPE = "AnnotationType-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
+	static final String JSON_ATTRIBUTE = "Attribute-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
+	static final String JSON_VALUE = "Value-" + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName();
+
+	public static int getNumberOfAnnotations() {
+		return 3;
+	}
+
+	public static String[] getPropsOfNthAnnotation(int i) {
+		return new String[] { ANNOTATION_TYPE + i, JSON_ATTRIBUTE + i, JSON_VALUE + i };
+	}
+
+	public static String createAnnotations(MetaDataObjectReference dataSetRef, String correlationId, MetadataStore mds, AnnotationStore as) {
+		try {
+			TestSyncDiscoveryServiceWritingAnnotations1.logger.info("Analysis will run on data set ref: " + dataSetRef);
+			MetaDataObject dataSet = mds.retrieve(dataSetRef);
+
+			String errorMessage = null;
+			if (dataSet == null) {
+				errorMessage = "Data set with id " + dataSetRef + " could not be retrieved";
+				TestSyncDiscoveryServiceWritingAnnotations1.logger.severe(errorMessage);
+				return errorMessage;
+			}
+
+			if (!(dataSet instanceof DataSet)) {
+				errorMessage = "Object with id " + dataSetRef + " is not a data set";
+				TestSyncDiscoveryServiceWritingAnnotations1.logger.severe(errorMessage);
+				return errorMessage;
+			}
+
+			// add some annotations
+			for (int i = 0; i < getNumberOfAnnotations(); i++) {
+				String[] annotValues = getPropsOfNthAnnotation(i);
+				ProfilingAnnotation annotation1 = new ProfilingAnnotation();
+				annotation1.setProfiledObject(dataSetRef);
+				annotation1.setAnnotationType(annotValues[0]);
+				JSONObject jo1 = new JSONObject();
+				jo1.put(annotValues[1], annotValues[2]);
+				jo1.put(REQUEST_PROPERTY_CORRELATION_ID, correlationId);
+				annotation1.setJsonProperties(jo1.write());
+
+// PG: dynamic type creation disabled (types are already created statically)
+//				mds.createAnnotationTypesFromPrototypes(Collections.singletonList(annotation1));
+				MetaDataObjectReference resultRef1 = as.store(annotation1);
+				if (resultRef1 == null) {
+					throw new RuntimeException("Annotation object " + i + " could not be created");
+				}
+			}
+
+			TestSyncDiscoveryServiceWritingAnnotations1.logger.info("Discovery service " + TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName() + "created annotations successfully");
+		} catch (Throwable exc) {
+			exc.printStackTrace();
+			TestSyncDiscoveryServiceWritingAnnotations1.logger.log(Level.WARNING, TestSyncDiscoveryServiceWritingAnnotations1.class.getSimpleName() + " has failed", exc);
+			return "Failed: " + Utils.getExceptionAsString(exc);
+		}
+		return null;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java
new file mode 100755
index 0000000..2e6d012
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ODFVersionTest.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.engine;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.engine.ODFVersion;
+import org.apache.atlas.odf.core.test.TimerTestBase;
+
+public class ODFVersionTest extends TimerTestBase {
+	@Test
+	public void testVersion() {
+		ODFVersion version = new ODFFactory().create().getEngineManager().getVersion();
+		Assert.assertNotNull(version);
+		Assert.assertTrue(version.getVersion().startsWith("1.2.0-"));
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java
new file mode 100755
index 0000000..465eb5c
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/engine/ShutdownTest.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.engine;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.engine.EngineManager;
+import org.apache.atlas.odf.api.engine.ODFEngineOptions;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
+
+public class ShutdownTest extends ODFTestBase {
+
+	private void runAndTestThreads() throws Exception {
+		ODFAPITest.runRequestAndCheckResult("successID", State.FINISHED, -1);
+		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
+		int numThreads = tm.getNumberOfRunningThreads();
+		log.info("--- Number of running threads: " + numThreads);
+		Assert.assertTrue(numThreads >= 3);		
+	}
+
+	@Test
+	public void testShutdown() throws Exception {
+
+		log.info("--- Running some request before shutdown...");
+		runAndTestThreads();
+
+		ThreadManager tm = new ODFInternalFactory().create(ThreadManager.class);
+		log.info("--- Number of threads before shutdown: " + tm.getNumberOfRunningThreads());
+
+		EngineManager engineManager = new ODFFactory().create().getEngineManager();
+		ODFEngineOptions options = new ODFEngineOptions();
+		options.setRestart(false);
+		int numThreads = tm.getNumberOfRunningThreads();
+		log.info("--- Number of threads before restart: " + numThreads);
+
+		engineManager.shutdown(options);
+		log.info("--- Shutdown requested...");
+		int maxWait = 60;
+		int waitCnt = 0;
+		log.info("--- Shutdown requested, waiting for max " + maxWait + " seconds");
+		while (tm.getNumberOfRunningThreads() > 0 && waitCnt < maxWait) {
+			waitCnt++;
+			Thread.sleep(1000);
+		}
+		log.info("--- Shutdown should be done by now, waited for " + waitCnt + " threads: " + tm.getNumberOfRunningThreads());
+		Assert.assertNotEquals(waitCnt, maxWait);
+
+	//	log.info("--- Starting ODF again....");
+
+	//	ODFInitializer.start();
+		log.info("--- Rerunning request after shutdown...");
+		runAndTestThreads();
+
+		int nrOfThreads = tm.getNumberOfRunningThreads();
+		options.setRestart(true);
+		engineManager.shutdown(options);
+		maxWait = nrOfThreads * 2;
+		waitCnt = 0;
+		log.info("--- Restart requested..., wait for a maximum of " + (nrOfThreads * 2500) + " ms");
+		while (tm.getNumberOfRunningThreads() > 0 && waitCnt < maxWait) {
+			waitCnt++;
+			Thread.sleep(1000);
+		}
+		log.info("--- Restart should be done by now");
+		Thread.sleep(5000);
+		numThreads = tm.getNumberOfRunningThreads();
+		log.info("--- Number of threads after restart: " + numThreads);
+		Assert.assertTrue(numThreads > 2);
+		log.info("--- testShutdown finished");
+
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java
new file mode 100755
index 0000000..c2be180
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/messaging/MockQueueManager.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.engine.MessagingStatus;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage;
+import org.apache.atlas.odf.core.controlcenter.AdminQueueProcessor;
+import org.apache.atlas.odf.core.controlcenter.ConfigChangeQueueProcessor;
+import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
+import org.apache.atlas.odf.core.controlcenter.DiscoveryServiceStarter;
+import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
+import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
+import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class MockQueueManager implements DiscoveryServiceQueueManager {
+
+	static Logger logger = Logger.getLogger(MockQueueManager.class.getName());
+
+	static Object lock = new Object();
+
+	static List<AdminMessage> adminQueue = Collections.synchronizedList(new ArrayList<AdminMessage>());
+	static List<StatusQueueEntry> statusQueue = Collections.synchronizedList(new ArrayList<StatusQueueEntry>());
+	static Map<String, List<AnalysisRequestTracker>> runtimeQueues = new HashMap<>();
+
+	ThreadManager threadManager;
+
+	public MockQueueManager() {
+		ODFInternalFactory factory = new ODFInternalFactory();
+		ExecutorServiceFactory esf = factory.create(ExecutorServiceFactory.class);
+		threadManager = factory.create(ThreadManager.class);
+		threadManager.setExecutorService(esf.createExecutorService());
+		//initialize();
+	}
+
+	@Override
+	public void start() throws TimeoutException {
+		logger.info("Initializing MockQueueManager");
+		List<ThreadStartupResult> threads = new ArrayList<ThreadStartupResult>();
+		ThreadStartupResult startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKADMIN", createQueueListener("Admin", adminQueue, new AdminQueueProcessor(), false));
+		boolean threadCreated = startUnmanagedThread.isNewThreadCreated();
+		threads.add(startUnmanagedThread);
+		startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKADMINCONFIGCHANGE",
+				createQueueListener("AdminConfig", adminQueue, new ConfigChangeQueueProcessor(), false));
+		threadCreated |= startUnmanagedThread.isNewThreadCreated();
+		threads.add(startUnmanagedThread);
+		startUnmanagedThread = this.threadManager.startUnmanagedThread("MOCKSTATUSSTORE",
+				createQueueListener("StatusStore", statusQueue, new DefaultStatusQueueStore.StatusQueueProcessor(), true));
+		threadCreated |= startUnmanagedThread
+				.isNewThreadCreated();
+		threads.add(startUnmanagedThread);
+
+		logger.info("New thread created: " + threadCreated);
+		if (threadCreated) {
+			try {
+				this.threadManager.waitForThreadsToBeReady(5000, threads);
+				logger.info("All threads ready");
+			} catch (TimeoutException e) {
+				final String message = "Not all thrads were created on time";
+				logger.warning(message);
+			}
+		}
+	}
+
+	@Override
+	public void stop() {
+		threadManager.shutdownThreads(Arrays.asList("MOCKADMIN", "MOCKADMINCONFIGCHANGE", "MOCKSTATUSSTORE"));
+	}
+
+	<T> T cloneObject(T obj) {
+		try {
+			return JSONUtils.cloneJSONObject(obj);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+	}
+
+	@Override
+	public void enqueue(AnalysisRequestTracker tracker) {
+		tracker = cloneObject(tracker);
+		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+		if (dsRequest == null) {
+			throw new RuntimeException("Tracker is finished, should not be enqueued");
+		}
+		String dsID = dsRequest.getDiscoveryServiceId();
+		dsRequest.setPutOnRequestQueue(System.currentTimeMillis());
+		synchronized (lock) {
+			ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(dsID);
+			if (runtime == null) {
+				throw new RuntimeException(MessageFormat.format("Runtime of discovery service ''{0}'' does not exist", dsID));
+			}
+			String runtimeName = runtime.getName();
+			List<AnalysisRequestTracker> mq = runtimeQueues.get(runtimeName);
+			if (mq == null) {
+				mq = Collections.synchronizedList(new ArrayList<AnalysisRequestTracker>());
+				runtimeQueues.put(runtimeName, mq);
+			}
+			boolean started = this.threadManager.startUnmanagedThread("MOCK" + runtimeName, createQueueListener("Starter" + runtimeName, mq, new DiscoveryServiceStarter(), false))
+					.isNewThreadCreated();
+			logger.info("New thread created for runtime " + runtimeName + ", started: " + started + ", current queue length: " + mq.size());
+			mq.add(tracker);
+		}
+	}
+
+	static class MockQueueListener implements ODFRunnable {
+		String name; 
+		QueueMessageProcessor processor;
+		List<?> queue;
+		boolean cancelled = false;
+		ExecutorService service;
+		int index = 0;
+
+		public MockQueueListener(String name, List<?> q, QueueMessageProcessor qmp, boolean fromBeginning) {
+			this.name = name;
+			this.processor = qmp;
+			this.queue = q;
+			if (fromBeginning) {
+				index = 0;
+			} else {
+				index = q.size();
+			}
+		}
+
+		long WAITTIMEMS = 100;
+
+		boolean isValidIndex() {
+			return index >= 0 && index < queue.size();
+		}
+
+		@Override
+		public void run() {
+			logger.info("MockQueueManager thread " + name + " started");
+
+			while (!cancelled) {
+			//	logger.info("Queue consumer " + name + ": checking index " + index + " on queue of size " + queue.size());
+				if (!isValidIndex()) {
+					try {
+						Thread.sleep(WAITTIMEMS);
+					} catch (InterruptedException e) {
+						e.printStackTrace();
+					}
+				} else {
+					Object obj = queue.get(index);
+					String msg;
+					try {
+						msg = JSONUtils.toJSON(obj);
+					} catch (JSONException e) {
+						e.printStackTrace();
+						cancelled = true;
+						return;
+					}
+					this.processor.process(service, msg, 0, index);
+					logger.finest("MockQConsumer " + name + ": Processed message: " + msg);
+					index++;
+				}
+			}
+			logger.info("MockQueueManager thread finished");
+
+		}
+
+
+		@Override
+		public void setExecutorService(ExecutorService service) {
+			this.service = service;
+		}
+
+		@Override
+		public void cancel() {
+			cancelled = true;
+		}
+
+		@Override
+		public boolean isReady() {
+			return true;
+		}
+
+	}
+
+	ODFRunnable createQueueListener(String name, List<?> queue, QueueMessageProcessor qmp, boolean fromBeginning) {
+		return new MockQueueListener(name, queue, qmp, fromBeginning);
+	}
+
+	@Override
+	public void enqueueInStatusQueue(StatusQueueEntry sqe) {
+		sqe = cloneObject(sqe);
+		statusQueue.add(sqe);
+	}
+
+	@Override
+	public void enqueueInAdminQueue(AdminMessage message) {
+		message = cloneObject(message);
+		adminQueue.add(message);
+	}
+
+	public static class MockMessagingStatus extends MessagingStatus {
+		String message;
+
+		public String getMessage() {
+			return message;
+		}
+
+		public void setMessage(String message) {
+			this.message = message;
+		}
+
+	}
+
+	@Override
+	public MessagingStatus getMessagingStatus() {
+		MockMessagingStatus mms = new MockMessagingStatus();
+		mms.setMessage("OK");
+		return mms;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java
new file mode 100755
index 0000000..f69513c
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/NotificationManagerTest.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.notification;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.notification.NotificationListener;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.notification.NotificationManager;
+import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
+
+public class NotificationManagerTest extends ODFTestBase {
+
+	@Test
+	public void testNotifications() throws Exception {
+		NotificationManager nm = new ODFInternalFactory().create(NotificationManager.class);
+		Assert.assertNotNull(nm);
+		log.info("Notification manager found " + nm.getClass().getName());
+		Assert.assertTrue(nm instanceof TestNotificationManager);
+		List<NotificationListener> listeners = nm.getListeners();
+		Assert.assertTrue(listeners.size() > 0);
+
+		OpenDiscoveryFramework odf = new ODFFactory().create();
+		List<String> dataSetIDs = Collections.singletonList("successID");
+		String id = ODFAPITest.runRequest(dataSetIDs, odf.getAnalysisManager());
+		ODFAPITest.waitForRequest(id, odf.getAnalysisManager());
+
+		int polls = 20;
+		boolean found = false;
+		boolean foundFinished = false;
+		do {
+			// now check that trackers were found through the notification mechanism
+			log.info("Checking that trackers were consumed, " + polls + " seconds left");
+			List<AnalysisRequestTracker> trackers = new ArrayList<>(TestNotificationManager.receivedTrackers);
+			log.info("Received trackers: " + trackers.size());
+			for (AnalysisRequestTracker tracker : trackers) {
+				String foundId = tracker.getRequest().getId();
+				if (foundId.equals(id)) {
+					found = true;
+					if (tracker.getStatus().equals(STATUS.FINISHED)) {
+						foundFinished = true;
+					}
+				}
+			}
+			polls--;
+			Thread.sleep(1000);
+		} while (!found && !foundFinished && polls > 0);
+		Assert.assertTrue(found);
+		Assert.assertTrue(foundFinished);
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java
new file mode 100755
index 0000000..80252d6
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/notification/TestNotificationManager.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.notification;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
+import org.apache.atlas.odf.core.notification.NotificationListener;
+import org.apache.atlas.odf.core.notification.NotificationManager;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+public class TestNotificationManager implements NotificationManager {
+
+	public static class TestListener1 implements NotificationListener {
+
+		@Override
+		public String getTopicName() {
+			return "odf-status-topic";
+		}
+
+		@Override
+		public void onEvent(String event, OpenDiscoveryFramework odf) {
+			try {
+				StatusQueueEntry sqe = JSONUtils.fromJSON(event, StatusQueueEntry.class);
+				AnalysisRequestTracker tracker = sqe.getAnalysisRequestTracker();
+				if (tracker != null) {
+					receivedTrackers.add(tracker);					
+				}
+			} catch (JSONException e) {
+				throw new RuntimeException(e);
+			}
+		}
+
+		@Override
+		public String getName() {
+			return this.getClass().getName();
+		}
+
+	}
+
+	public static List<AnalysisRequestTracker> receivedTrackers = Collections.synchronizedList(new ArrayList<AnalysisRequestTracker>());
+
+	@Override
+	public List<NotificationListener> getListeners() {
+		List<NotificationListener> result = new ArrayList<>();
+		result.add(new TestListener1());
+		return result;
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java
new file mode 100755
index 0000000..8a8d9a8
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/RuntimeExtensionTest.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.runtime;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
+
+public class RuntimeExtensionTest extends ODFTestBase {
+
+	static final String SERVICE_ON_TEST_RUNTIME = "testruntimeservice";
+
+	List<String> getNames(List<ServiceRuntime> rts) {
+		List<String> result = new ArrayList<>();
+		for (ServiceRuntime rt : rts) {
+			result.add(rt.getName());
+		}
+		return result;
+	}
+
+	@Test
+	public void testActiveRuntimes() {
+		List<String> allNames = getNames(ServiceRuntimes.getAllRuntimes());
+		Assert.assertTrue(allNames.contains(TestServiceRuntime.TESTSERVICERUNTIME_NAME));
+
+		List<String> activeNames = getNames(ServiceRuntimes.getActiveRuntimes());
+		Assert.assertTrue(activeNames.contains(TestServiceRuntime.TESTSERVICERUNTIME_NAME));
+	}
+
+	@Test
+	public void testRuntimeForNewService() {
+		ServiceRuntime rt = ServiceRuntimes.getRuntimeForDiscoveryService(SERVICE_ON_TEST_RUNTIME);
+		Assert.assertNotNull(rt);
+		Assert.assertEquals(TestServiceRuntime.TESTSERVICERUNTIME_NAME, rt.getName());
+	}
+
+	static Object lock = new Object();
+
+	@Test
+	public void testRuntimeExtensionSimple() throws Exception {
+		synchronized (lock) {
+			OpenDiscoveryFramework odf = new ODFFactory().create();
+			TestServiceRuntime.runtimeBlocked = false;
+			AnalysisRequest request = ODFAPITest.createAnalysisRequest(Collections.singletonList(ODFAPITest.DUMMY_SUCCESS_ID));
+			request.setDiscoveryServiceSequence(Collections.singletonList(SERVICE_ON_TEST_RUNTIME));
+			log.info("Starting service for test runtime");
+			AnalysisResponse resp = odf.getAnalysisManager().runAnalysis(request);
+			String requestId = resp.getId();
+			Assert.assertTrue(ODFAPITest.waitForRequest(requestId, odf.getAnalysisManager(), 40, State.FINISHED));
+			Assert.assertTrue(TestServiceRuntime.requests.contains(requestId));
+			log.info("testRuntimeExtensionSimple finished");
+
+			// block runtime again to restore state before testcase
+			TestServiceRuntime.runtimeBlocked = true;
+			Thread.sleep(5000);
+		}
+	}
+
+	@Test
+	public void testBlockedRuntimeExtension() throws Exception {
+		synchronized (lock) {
+			OpenDiscoveryFramework odf = new ODFFactory().create();
+			TestServiceRuntime.runtimeBlocked = true;
+			AnalysisRequest request = ODFAPITest.createAnalysisRequest(Collections.singletonList(ODFAPITest.DUMMY_SUCCESS_ID));
+			request.setDiscoveryServiceSequence(Collections.singletonList(SERVICE_ON_TEST_RUNTIME));
+			log.info("Starting service for test runtime");
+			AnalysisResponse resp = odf.getAnalysisManager().runAnalysis(request);
+			String requestId = resp.getId();
+			Assert.assertFalse(resp.isInvalidRequest());
+			log.info("Checking that service is not called");
+			for (int i = 0; i < 5; i++) {
+				Assert.assertFalse(TestServiceRuntime.requests.contains(requestId));
+				Thread.sleep(1000);
+			}
+			log.info("Unblocking runtime...");
+			TestServiceRuntime.runtimeBlocked = false;
+			Thread.sleep(5000); // give service time to start consumption
+			log.info("Checking that request has finished");
+			Assert.assertTrue(ODFAPITest.waitForRequest(requestId, odf.getAnalysisManager(), 40, State.FINISHED));
+			log.info("Checking that service was called");
+			Assert.assertTrue(TestServiceRuntime.requests.contains(requestId));
+			log.info("testBlockedRuntimeExtension finished");
+			
+			// block runtime again to restore state before testcase
+			TestServiceRuntime.runtimeBlocked = true;
+			Thread.sleep(5000);
+		}
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java
new file mode 100755
index 0000000..d16e10a
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/runtime/TestServiceRuntime.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.runtime;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryService;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+
+public class TestServiceRuntime implements ServiceRuntime {
+
+	static Logger logger = ODFTestLogger.get();
+
+	public static final String TESTSERVICERUNTIME_NAME = "TestServiceRuntime";
+	
+	public static boolean runtimeBlocked = true;
+
+	@Override
+	public String getName() {
+		return TESTSERVICERUNTIME_NAME;
+	}
+
+	@Override
+	public long getWaitTimeUntilAvailable() {
+		if (runtimeBlocked) {
+			return 1000;
+		}
+		return 0;
+	}
+
+	public static Set<String> requests = new HashSet<>();
+
+	public static class DSProxy extends SyncDiscoveryServiceBase {
+
+		@Override
+		public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+			logger.info("Running test runtime service");
+			requests.add(request.getOdfRequestId());
+			DiscoveryServiceSyncResponse resp = new DiscoveryServiceSyncResponse();
+			resp.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+			resp.setDetails("Test success");
+			return resp;
+		}
+	}
+
+	@Override
+	public DiscoveryService createDiscoveryServiceProxy(DiscoveryServiceProperties props) {
+		return new DSProxy();
+	}
+
+	@Override
+	public String getDescription() {
+		return "TestServiceRuntime description";
+	}
+
+	@Override
+	public void validate(DiscoveryServiceProperties props) throws ValidationException {
+	}
+
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java
new file mode 100755
index 0000000..30848bd
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/MockSparkServiceExecutor.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.spark;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+public class MockSparkServiceExecutor implements SparkServiceExecutor {
+	Logger logger = Logger.getLogger(MockSparkServiceExecutor.class.getName());
+
+	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer) {
+		DataSetCheckResult checkResult = new DataSetCheckResult();
+		checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
+		return checkResult;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request) {
+		logger.log(Level.INFO, "Starting Spark mock application.");
+		DiscoveryServiceSparkEndpoint sparkEndpoint;
+		try {
+			sparkEndpoint = JSONUtils.convert(dsri.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+		if (sparkEndpoint.getJar() == null) {
+			throw new RuntimeException("Spark application is not set in Spark endpoint.");
+		}
+		logger.log(Level.INFO, "Application name is {0}.", sparkEndpoint.getJar());
+		logger.log(Level.INFO, "Spark application finished.");
+		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+		response.setDetails("Discovery service completed successfully.");
+		return  response;
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java
new file mode 100755
index 0000000..661cfe2
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/spark/SimpleSparkDiscoveryServiceTest.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.spark;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Level;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.core.metadata.DefaultMetadataStore;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+
+public class SimpleSparkDiscoveryServiceTest extends ODFTestBase {
+
+	public static int WAIT_MS_BETWEEN_POLLING = 500;
+	public static int MAX_NUMBER_OF_POLLS = 500;
+	
+	@Test
+	public void testSparkService() throws Exception{
+		log.info("Running request ");
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+		AnalysisRequest request = new AnalysisRequest();
+		List<MetaDataObjectReference> dataSetRefs = new ArrayList<>();
+		MetadataStore mds = new ODFFactory().create().getMetadataStore();
+		if (!(mds instanceof DefaultMetadataStore)) {
+			throw new RuntimeException(MessageFormat.format("This tests does not work with metadata store implementation \"{0}\" but only with the DefaultMetadataStore.", mds.getClass().getName()));
+		}
+		DefaultMetadataStore defaultMds = (DefaultMetadataStore) mds;
+		defaultMds.resetAllData();
+		RelationalDataSet dataSet = new DataFile();
+		MetaDataObjectReference ref = new MetaDataObjectReference();
+		ref.setId("datafile-mock");
+		dataSet.setReference(ref);
+		defaultMds.createObject(dataSet);
+		defaultMds.commit();
+		dataSetRefs.add(dataSet.getReference());
+		request.setDataSets(dataSetRefs);
+		List<String> serviceIds = Arrays.asList(new String[]{"spark-service-test"});
+		request.setDiscoveryServiceSequence(serviceIds);
+
+		log.info("Starting analyis");
+		AnalysisResponse response = analysisManager.runAnalysis(request);
+		Assert.assertNotNull(response);
+		String requestId = response.getId();
+		Assert.assertNotNull(requestId);
+		log.info("Request id is " + requestId + ".");
+
+		log.info("Waiting for request to finish");
+		AnalysisRequestStatus status = null;
+		int maxPolls = MAX_NUMBER_OF_POLLS;
+		do {
+			status = analysisManager.getAnalysisRequestStatus(requestId);
+			log.log(Level.INFO, "Poll request for request ID ''{0}'', state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails() });
+			maxPolls--;
+			try {
+				Thread.sleep(WAIT_MS_BETWEEN_POLLING);
+			} catch (InterruptedException e) {
+				log.log(Level.INFO, "Exception thrown: ", e);
+			}
+		} while (maxPolls > 0 && (status.getState() == AnalysisRequestStatus.State.ACTIVE || status.getState() == AnalysisRequestStatus.State.QUEUED || status.getState() == AnalysisRequestStatus.State.NOT_FOUND));
+		if (maxPolls == 0) {
+			log.log(Level.INFO, "Request ''{0}'' is not finished yet, don't wait for it", requestId);
+		}
+		Assert.assertEquals(AnalysisRequestStatus.State.FINISHED, status.getState());
+		log.log(Level.INFO, "Request ''{0}'' is finished.", requestId);
+	}
+}
diff --git a/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java
new file mode 100755
index 0000000..191d337
--- /dev/null
+++ b/odf/odf-core/src/test/java/org/apache/atlas/odf/core/test/store/MockConfigurationStorage.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.store;
+
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class MockConfigurationStorage implements ODFConfigurationStorage {
+
+	static JSONObject config;
+
+	static {
+		try {
+			config = new JSONObject(MockConfigurationStorage.class.getClassLoader().getResourceAsStream("org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json"));
+		} catch (JSONException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+			throw new RuntimeException(e);
+		}
+	}
+
+	@Override
+	public void storeConfig(ConfigContainer container) {
+		try {
+			config = JSONUtils.toJSONObject(container);
+		} catch (Exception e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+			throw new RuntimeException(e);
+		}
+	}
+
+	@Override
+	public ConfigContainer getConfig(ConfigContainer defaultConfig) {
+		try {
+			return JSONUtils.fromJSON(config.write(), ConfigContainer.class);
+		} catch (Exception e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+			throw new RuntimeException(e);
+		}
+	}
+
+	@Override
+	public void onConfigChange(ConfigContainer container) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void addPendingConfigChange(String changeId) {
+		// do nothing
+	}
+
+	@Override
+	public void removePendingConfigChange(String changeId) {
+		// do nothing
+	}
+
+	@Override
+	public boolean isConfigChangePending(String changeId) {
+		return false;
+	}
+
+}
diff --git a/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt b/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt
new file mode 100755
index 0000000..63a0bb3
--- /dev/null
+++ b/odf/odf-core/src/test/resources/META-INF/odf/odf-runtimes.txt
@@ -0,0 +1,14 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+org.apache.atlas.odf.core.test.runtime.TestServiceRuntime
diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json
new file mode 100755
index 0000000..34dbf78
--- /dev/null
+++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/integrationtest/metadata/internal/atlas/nested_annotation_example.json
@@ -0,0 +1,111 @@
+{  
+   "prop1":"mystring",
+   "prop2":999,
+   "prop3":999.999,
+   "obj1":{  
+      "prop1":"mystring",
+      "prop2":999,
+      "prop3":999.999
+   },
+   "arr1":[  
+      {  
+         "prop1":"mystring",
+         "prop2":999,
+         "prop3":999.999
+      }
+   ],
+   "obj2":{  
+      "prop1":"mystring",
+      "prop2":999,
+      "prop3":999.999,
+      "nobj21":{  
+         "prop1":"mystring",
+         "prop2":999,
+         "prop3":999.999,
+         "nnarr211":[  
+            {  
+               "prop1":"mystring",
+               "prop2":999,
+               "prop3":999.999
+            }
+         ]
+      },
+      "narr21":[  
+         {  
+            "prop1":"mystring",
+            "prop2":999,
+            "prop3":999.999,
+            "nnarr211":[  
+               {  
+                  "prop1":"mystring",
+                  "prop2":999,
+                  "prop3":999.999
+               }
+            ]
+         }
+      ]
+   },
+   "obj3":{  
+      "prop1":"mystring",
+      "prop2":999,
+      "prop3":999.999,
+      "nobj31":{  
+         "prop1":"mystring",
+         "prop2":999,
+         "prop3":999.999,
+         "nnobj31":{  
+            "prop1":"mystring",
+            "prop2":999,
+            "prop3":999.999
+         }
+      },
+      "narr31":[  
+         {  
+            "prop1":"mystring",
+            "prop2":999,
+            "prop3":999.999,
+            "nnarr311":[  
+               {  
+                  "prop1":"mystring",
+                  "prop2":999,
+                  "prop3":999.999,
+                  "nnnarr3111":[  
+                     {  
+                        "prop1":"mystring",
+                        "prop2":999,
+                        "prop3":999.999
+                     }
+                  ]
+               }
+            ]
+         }
+      ]
+   },
+   "obj4":{  
+      "prop1":"mystring",
+      "prop2":999,
+      "prop3":999.999,
+      "nobj41":{  
+         "prop1":"mystring",
+         "prop2":999,
+         "prop3":999.999,
+         "nobj411":{  
+            "prop1":"mystring",
+            "prop2":999,
+            "prop3":999.999,
+            "nnnarr4111":[  
+               {  
+                  "prop1":"mystring",
+                  "prop2":999,
+                  "prop3":999.999,
+                  "nnobj41111":{  
+                     "prop1":"mystring",
+                     "prop2":999,
+                     "prop3":999.999
+                  }
+               }
+            ]
+         }
+      }
+   }
+}
diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json
new file mode 100755
index 0000000..146748d
--- /dev/null
+++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/annotation/annotexttest1.json
@@ -0,0 +1,8 @@
+{
+    "javaClass": "aHopefullyUnknownClass",
+	"profiledObject": null,
+	"annotationType": "MySubType",
+	"analysisRun": "bla",
+	"newProp1": "newProp1Value",
+	"newProp2": 4237
+}
diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json
new file mode 100755
index 0000000..c7e365f
--- /dev/null
+++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/internal/odf-initial-configuration.json
@@ -0,0 +1,114 @@
+{
+	"odf" : {
+		"instanceId" : "odf-default-id-CHANGEME",
+		"odfUrl" : "https://localhost:58081/odf-web-1.2.0-SNAPSHOT",
+		"odfUser" : "odf",
+		"odfPassword" : "ZzTeX3hKtVORgks+2TaLPWxerucPBoxK",
+		"runNewServicesOnRegistration": false,
+		"runAnalysisOnImport": false,
+		"reuseRequests": true,
+		"discoveryServiceWatcherWaitMs": 2000,
+		"enableAnnotationPropagation": true,
+		"messagingConfiguration": {
+			"type": "org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration",
+			"analysisRequestRetentionMs": 86400000,
+			"queueConsumerWaitMs": 2000,
+			"kafkaBrokerTopicReplication": 1,
+			"kafkaConsumerConfig": {
+				"offsetsStorage": "kafka",
+				"zookeeperSessionTimeoutMs": 400,
+				"zookeeperConnectionTimeoutMs": 6000
+			}
+		},
+		"userDefined": {
+		}
+	},
+	"registeredServices": [{
+			"id": "asynctestservice",
+			"name": "Async test",
+			"description": "The async test service",
+			"resultingAnnotationTypes": [
+				"AsyncTestDummyAnnotation"
+			],
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryService1"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "asynctestservice-with-annotations",
+			"name": "Async test including metadata access",
+			"description": "The async test service writing annotations",
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.discoveryservice.TestAsyncDiscoveryServiceWritingAnnotations1"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "synctestservice",
+			"name": "Sync test",
+			"description": "The Sync test service",
+			"resultingAnnotationTypes": [
+				"SyncTestDummyAnnotation"
+			],
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryService1"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "synctestservice-with-annotations",
+			"name": "Sync test with annotations",
+			"description": "The Sync test service writing annotations",
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.discoveryservice.TestSyncDiscoveryServiceWritingAnnotations1"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "synctestservice-with-extendedannotations",
+			"name": "Sync test with extended annotations",
+			"description": "The Sync test service writing annotations with extension mechanism",
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingExtendedAnnotations"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "synctestservice-with-json-annotations",
+			"name": "Sync test with json annotations",
+			"description": "The Sync test service writing annotations returned from a json file",
+			"endpoint": {
+				"runtimeName": "Java",
+				"className": "org.apache.atlas.odf.core.test.annotation.TestSyncDiscoveryServiceWritingJsonAnnotations"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "spark-service-test",
+			"name": "Simple Spark mock test",
+			"description": "The Spark test is calling a mock version of the SparkAppExecutor",
+			"endpoint": {
+				"runtimeName": "Spark",
+				"inputMethod": "DataFrame",
+				"jar": "my-example-application-jar",
+				"className": "my-example-class-name"
+			},
+			"parallelismCount" : 2
+		},
+		{
+			"id": "testruntimeservice",
+			"name": "Runtime test service",
+			"description": "Runtime test service description",
+			"endpoint": {
+				"runtimeName": "TestServiceRuntime"
+			}
+		}
+		
+	]
+}
diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json
new file mode 100755
index 0000000..be29e1e
--- /dev/null
+++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json
@@ -0,0 +1,31 @@
+{
+	"user": "isadmin",
+	"lastModified": "1443795291000",
+	"discoveryServiceRequests": [{
+		"dataSetContainer": {
+			"dataSet": {
+				"javaClass": "org.apache.atlas.odf.core.metadata.models.Document",
+				"name": "someDocument",
+				"reference": {
+					"id": "testdataset"
+				}
+			}
+		},
+		"discoveryServiceId": "testservice"
+	}],
+	"nextDiscoveryServiceRequest": 1,
+	"request": {
+		"dataSets": [{
+			"id": "testdataset"
+		}],
+		"id": "testid"
+	},
+	"status": "FINISHED",
+	"statusDetails": "All discovery services run successfully",
+	"discoveryServiceResponses": [{
+		"type": "async",
+		"runId": "IARUNID6f49fdfd-89ce-4d46-9067-b3a4db4698ba",
+		"details": "IA has run successfully",
+		"code": "OK"
+	}]
+}
diff --git a/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..63c84cb
--- /dev/null
+++ b/odf/odf-core/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,20 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+## USE for TESTs only
+
+
+org.apache.atlas.odf.core.store.ODFConfigurationStorage=org.apache.atlas.odf.core.test.store.MockConfigurationStorage
+org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager=org.apache.atlas.odf.core.test.messaging.MockQueueManager
+org.apache.atlas.odf.api.spark.SparkServiceExecutor=org.apache.atlas.odf.core.test.spark.MockSparkServiceExecutor
+org.apache.atlas.odf.core.notification.NotificationManager=org.apache.atlas.odf.core.test.notification.TestNotificationManager
diff --git a/odf/odf-doc/.gitignore b/odf/odf-doc/.gitignore
new file mode 100755
index 0000000..8b22f9d
--- /dev/null
+++ b/odf/odf-doc/.gitignore
@@ -0,0 +1,19 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+target
+.settings
+.classpath
+.project
+.factorypath
+.DS_Store
diff --git a/odf/odf-doc/README.txt b/odf/odf-doc/README.txt
new file mode 100755
index 0000000..80dbb61
--- /dev/null
+++ b/odf/odf-doc/README.txt
@@ -0,0 +1,3 @@
+The documentation project is based on the Maven Site Plugin and Maven Doxia. The resulting war file is merged into the war file of the sdp-web project using the overlay mechanism of the Maven War Plugin. The resulting documentation is available through the getting started page of the SDP web console. 
+
+Edit the src/site/markdown/*.md files in order to update the documentation. The structure of the web site can be changed in file src/site/site.xml.
diff --git a/odf/odf-doc/pom.xml b/odf/odf-doc/pom.xml
new file mode 100755
index 0000000..6ebffcf
--- /dev/null
+++ b/odf/odf-doc/pom.xml
@@ -0,0 +1,163 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-doc</artifactId>
+	<packaging>war</packaging>
+	<dependencies>
+		<dependency>
+			<groupId>javax.ws.rs</groupId>
+			<artifactId>jsr311-api</artifactId>
+			<version>1.1.1</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<!-- The following dependencies are required by Spark Discovery Services only and are provided by the Spark cluster -->
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-core_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-sql_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+	</dependencies>
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-war-plugin</artifactId>
+				<version>2.6</version>
+				<configuration>
+					<webResources>
+						<resource>
+							<directory>${project.build.directory}/site</directory>
+							<targetPath>/doc</targetPath>
+						</resource>
+					</webResources>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-javadoc-plugin</artifactId>
+				<version>2.10.3</version>
+				<configuration>
+					<sourcepath>${basedir}/../odf-api/src/main/java</sourcepath>
+					<outputDirectory>${project.build.directory}/doc</outputDirectory>
+					<excludePackageNames>org.apache.atlas.odf.core.metadata.atlas:org.apache.atlas.odf.core.metadata.importer:org.apache.atlas.odf.core.metadata.internal:org.apache.atlas.odf.json</excludePackageNames>
+				</configuration>
+				<executions>
+					<execution>
+						<id>generate-javadocs</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>javadoc</goal>
+						</goals>
+ 					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-site-plugin</artifactId>
+				<version>3.3</version>
+				<configuration>
+					<port>9000</port>
+					<tempWebappDirectory>${basedir}/target/site/tempdir</tempWebappDirectory>
+					<generateProjectInfo>false</generateProjectInfo>
+					<generateReports>false</generateReports>
+					<inputEncoding>UTF-8</inputEncoding>
+					<outputEncoding>UTF-8</outputEncoding>
+				</configuration>
+				<executions>
+					<execution>
+						<id>generate-html</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>site</goal>
+						</goals>
+					</execution>
+				</executions>
+				<dependencies>
+					<dependency>
+						<groupId>org.apache.maven.doxia</groupId>
+						<artifactId>doxia-module-markdown</artifactId>
+						<version>1.3</version>
+					</dependency>
+				</dependencies>
+			</plugin>
+			<!--  this section compiles the tutorial project to check if the code is valid.
+			 -->
+			 <!--
+			<plugin>
+				<artifactId>maven-invoker-plugin</artifactId>
+				<version>2.0.0</version>
+				<configuration>
+					<projectsDirectory>src/site/resources/tutorial-projects</projectsDirectory>
+					<cloneProjectsTo>${project.build.directory}/tutorial-projects-build</cloneProjectsTo>
+				</configuration>
+				<executions>
+					<execution>
+						<id>compile-tutorial-projects</id>
+						<goals>
+							<goal>run</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			 -->
+		</plugins>
+	</build>
+	<profiles>
+		<profile>
+			<!--  Turn off additional checks for maven-javadoc-plugin that will cause build errors when using Java 8 -->
+			<!--  See http://stackoverflow.com/questions/22528767/how-to-work-around-the-stricter-java-8-javadoc-when-using-maven -->
+			<id>disable-java8-doclint</id>
+			<activation>
+				<jdk>[1.8,)</jdk>
+			</activation>
+			<properties>
+				<additionalparam>-Xdoclint:none</additionalparam>
+			</properties>
+		</profile>
+	</profiles>
+</project>
diff --git a/odf/odf-doc/src/main/webapp/WEB-INF/web.xml b/odf/odf-doc/src/main/webapp/WEB-INF/web.xml
new file mode 100755
index 0000000..104b5e4
--- /dev/null
+++ b/odf/odf-doc/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,19 @@
+<!--
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<!DOCTYPE web-app PUBLIC
+ "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
+ "http://java.sun.com/dtd/web-app_2_3.dtd" >
+<web-app>
+  <display-name>odf-doc</display-name>
+</web-app>
diff --git a/odf/odf-doc/src/site/markdown/api-reference.md b/odf/odf-doc/src/site/markdown/api-reference.md
new file mode 100755
index 0000000..c03ada2
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/api-reference.md
@@ -0,0 +1,23 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# API reference
+
+[General ODF API reference](../swagger).
+
+[Java Docs for ODF services](./apidocs/index.html)
diff --git a/odf/odf-doc/src/site/markdown/build.md b/odf/odf-doc/src/site/markdown/build.md
new file mode 100755
index 0000000..8571973
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/build.md
@@ -0,0 +1,107 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Build
+
+This page describes how to build ODF.  
+
+## Prerequisites
+
+You need git, Maven, and Python (2.7 (not 3!)) available on the command line.
+If you run these commands and you see similar output you should be all set:
+
+	$ mvn -v
+	Apache Maven 3.3.9 (bb52d8502b132ec0a5a3f4c09453c07478323dc5; 2015-11-10T17:41:47+01:00)
+	...
+
+	$ python -V
+	Python 2.7.10
+
+	$ git --version
+	git version 2.7.4
+
+### Additional Prerequisites on Windows
+
+- For the build: The directory C:\tmp needs to exist
+- For the tests and the test environment to run properly: The `HADOOP_HOME` environment variable must be set to a location where the Hadoop [winutils.exe](http://public-repo-1.hortonworks.com/hdp-win-alpha/winutils.exe) file is available in a the bin folder. For example, if the environment variable is set to `HADOOP_HOME=c:\hadoop`, the file needs to be available at `c:\hadoop\bin\winutils.exe`.
+- In your Maven install directory go to bin and copy mvn.cmd to mvn.bat
+
+## Building
+
+To build, clone the repository and perform a maven build in the toplevel directory. These commands should do the trick:
+
+	git clone https://github.com/Analytics/open-discovery-framework.git
+	cd open-discovery-framework
+	mvn clean install
+
+Add the `-Dreduced-build` option to build and test only the core components and services of ODF:
+
+	mvn clean install -Dreduced-build
+
+## Fast build without tests or with reduced tests
+
+To build without running tests run maven with the following options (The second one prevents the test Atlas instance from being started and stopped):
+
+	mvn clean install -DskipTests -Duse.running.atlas
+
+Use the `-Dreduced-tests` option to run only a reduced set of tests:
+
+	mvn clean install -Dreduced-tests
+
+This will skip all integration tests (i.e. all tests that involve Atlas) and also some of the long running tests. The option may be combined with the `-Dreduced-build` option introduced above.
+
+## Building the test environment
+
+You can build a test environment that contains Atlas and
+Kafka, and Jetty by running these commands:
+
+	cd odf-test-env
+	mvn package
+
+This will create a zip file with the standalone test environment under
+``odf-test-env/target/odf-test-env-0.1.0-SNAPSHOT-bin.zip``.
+See the contents of this zip file or the [documentation section on the test environment](test-env.html)
+for details.
+
+Congrats! You have just built ODF.
+This should be enough to get you going. See below for additional information
+on different aspects of the build.
+
+## Additional Information
+
+### Working with Eclipse
+
+To build with Eclipse you must have the maven m2e plugin and EGit installed (e.g., search for "m2e maven integration for eclipse" and "egit", respectively, on the Eclipse marketplace).
+
+- Clone the repository into some directory as above, e.g., /home/code/odf.
+- Open Eclipse with a workspace in a different directory.
+- Go to File -> Import -> Maven -> Existing Maven projects.
+- Enter /home/code/odf as the root directory.
+- Select all projects and click Finish.
+- Internally, Eclipse will now perform Maven builds but you can work with the code as usual.
+
+If you want to build via Run configurations be aware that this will not work with the embedded
+maven provided by the m2e plugin. Instead you will have to do this:
+
+- Open Windows -> Preferences -> Maven -> Installations
+- Add a new installation pointing to your external Maven installation
+- For each run configuration you use, select the new installation in the Maven runtime dropdown
+(you might also have to set JAVA_HOME in the environment tab).
+
+  [1]: http://iis-repo.swg.usma.ibm.com:8080/archiva/repository/all/
+  [2]: https://ips-rtc.swg.usma.ibm.com/jazz/web/projects
diff --git a/odf/odf-doc/src/site/markdown/configuration.md b/odf/odf-doc/src/site/markdown/configuration.md
new file mode 100755
index 0000000..4a61bb2
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/configuration.md
@@ -0,0 +1,19 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Configuration
diff --git a/odf/odf-doc/src/site/markdown/data-model.md b/odf/odf-doc/src/site/markdown/data-model.md
new file mode 100755
index 0000000..4629abf
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/data-model.md
@@ -0,0 +1,124 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Data Model
+
+This section describes the basic data model of how results of discovery services are
+stored and how new discovery services can extend and enrich this model.
+
+See the section [ODF Metadata API](odf-metadata-api.html) for general information
+on how to retrieve metadata.
+
+You can find the current Atlas data model in the file
+
+	odf-core/src/main/resources/org/apache/atlas/odf/core/metadata/internal/atlas/atlas-odf-model.json
+
+which contains JSON that can be POSTed to the Atlas `types` REST resource to create those types.
+
+## Annotations
+
+All discovery services results are called "annotations". An annotation is an object
+that annotates another object with a certain piece of information.
+For instance, you could have a `DataClassAnnotation` which has a reference attribute `classifiedObject` linking it to
+a column `NAME` of a table `CUSTREC`, a list of reference attributes `classifyingObjects` linking it to business terms.
+An additional attribute `confidences` might provide a list of numeric confidence values indicating the "strength" of the
+relationship between the classifiedObject and the respective classifyingObject (business term).
+ For column `NAME` the list of classifying objects may have a single entry `Customer Name` the list of confidence values
+ may have a single value `0.7`.
+This annotation expresses the fact that the term classification services registered and active in ODF have come up with a
+70% confidence of CUSTREC.NAME representing a customer name.
+
+Technically, an annotation is a subtype of one of the three *base types* which are subtyes of the (abstract)
+Atlas type `Annotation`:
+
+- `ProfilingAnnotation`
+- `ClassificationAnnotation`
+- `RelationshipAnnotation`
+
+
+A `ProfilingAnnotation` assigns non-reference attributes to an object. It has the following non-reference attributes:
+
+- `annotationType`: The type of annotation. A Json string of the form
+   `{"stdType": "DataQualityAnnotation",`
+   ` "runtime": "JRE",`
+   ` "spec" : “org.apache.atlas.myservice.MyAnnotation"`
+   `}`
+   where `stdType` is a base or standardized type name (see below for standardized types), `runtime` names the runtime,
+   and `spec` is a runtime-specific string which helps the runtime to deal with instances of this type. In case of a Java
+   runtime the `spec` is the name of the implementing class which may be a subclass of the `stdType`.
+- `analysisRun`: A string that is set to the request Id of the analysis that created it.
+(Compare the swagger documentation of the REST resource `analyses`, e.g. [here](https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT/swagger/#/analyses).
+*Internal Note*: will be replaced with RID
+- `summary`: A human-readable string that presents a short summary of the annotation.
+Might be used in generic UIs for displaying unknown annotations.
+*Internal note*: deprecated
+- `jsonProperties`: A string attributes where you can store arbitrary JSON as a string.
+Can be used to 'extend' standard annotations.
+
+...and a single referencing attribute:
+
+- `profiledObject`: The object that is annotated by this annotation. In the example above,
+this would point to the Column object.
+
+
+A `ClassificationAnnotation` assigns any number (including 0) of meta data objects to an object.
+It has the same non-reference attributes as `ProfilingAnnotation` plus the following reference attributes:
+
+- `classifiedObject`: The object that is annotated by this annotation.
+- `classifyingObjects`: List of references to meta data objects classifying the classifiedObject.
+
+A `RelationshipAnnotation`  expresses a relationship between meta data objects.
+It has the same non-reference attributes as `ProfilingAnnotation` plus a single reference attribute:
+
+- `relatedObjects`: List of references to related meta data objects.
+
+
+Note that annotations are implemented as proper Atlas object types and not traits (labels) for these reasons:
+
+- Annotations of the same type but of different discovery service should be able co-exist, for instance,
+to be able to compare results of different services downstream.
+This is only partly possible with traits.
+- Relationships between objects can not easily be modeled with traits.
+
+A discovery service can deliver its results in a base, standardized, or *custom annotation type*. Depending on the type of the
+underlying relationship a custom annotation type might be a subtype of `ProfilingAnnotation` (asymmetric, single reference attribute),
+`ClassificationAnnotation` (asymmetric, any number of reference attributes), or `RelationshipAnnotation` (symmetric, any number
+of reference attributes). A custom annotation type can have additional non-reference attributes that are stored in its `jsonProperties`.
+
+When implemented in Java, the class defining a custom annotation has private fields and corresponding getter/setter methods
+representing the additional information.
+
+
+##Example
+
+
+For instance, creating a new annotation of type `org.apache.atlas.oli.MyAnnotation` could look like this.
+
+	public class MyAnnotation extends ClassificationAnnotation {
+	   String myNewAttribute;
+
+	   public String getMyNewAttribute() {
+	      return myNewAttribute;
+	   }
+
+	   public void setMyNewAttribute(String myNewAttribute) {
+	      this.myNewAttribute = myNewAttribute;
+	   }
+	}
+
+Annotations can be mapped into standardized meta data objects by a *propagator* which implements the `AnnotationPropagator` interface.
diff --git a/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md b/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md
new file mode 100755
index 0000000..381e5dc
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/discovery-service-tutorial.md
@@ -0,0 +1,161 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Tutorial: Build and run your first Discovery Service
+This tutorial shows how you can create your first discovery service in Java that analyzes a data set and creates a single annotation of a new type.
+This tutorial requires that you have [Maven](http://maven.apache.org/) installed.
+
+
+## Create a discovery service
+Follow these steps to create and package a Java implementation of the simplest discovery service.
+
+#### Step 1: Create ODF discovery service maven project
+Create a new Java Maven project from the ODF provided archetype ``odf-archetype-discoveryservice`` (group ID ``org.apache.atlas.odf``).
+Choose the following values for the respective parameters:
+
+| Parameter | Value                    |
+|-----------|--------------------------|
+|groupId    | odftutorials             |
+|artifactId | discoveryservicetutorial |
+|version    | 0.1                      |
+
+
+From the command line, your command may look like this:
+
+	mvn archetype:generate -DarchetypeGroupId=org.apache.atlas.odf -DarchetypeArtifactId=odf-archetype-discoveryservice -DarchetypeVersion=0.1.0-SNAPSHOT -DgroupId=odftutorials -DartifactId=discoveryservicetutorial -Dversion=0.1
+
+This will create a new Maven project with a pom that has dependencies on ODF.
+It will also create two Java classes ``MyDiscoveryService`` and ``MyAnnotation``
+that you may want to use as a basis for the following steps.
+
+If you use Eclipse to create your project, be sure to enable the checkbox "Include snapshot archetypes" in the
+archetype selection page of the New Maven Project wizard.
+
+If you are not interested in the actual code at this point, you may skip Steps 2 through 4 and go directly
+to step 5.
+
+#### Step 2 (optional): Check the discovery service implementation class
+Create a new Java class named ``odftutorials.MyDiscoveryService`` that inherits from `org.apache.atlas.odf.core.discoveryservice.SyncDiscoveryServiceBase`.
+As the interface name indicates, our service will be synchronous, i.e., it will have a simple method ``runAnalysis()`` that returns
+the analysis result. For the implementation of long-running, asynchronous services, see TODO.
+The archetype creation has already filled in some code here that we will use. Your class
+should look something like this:
+
+	public class MyDiscoveryService extends SyncDiscoveryServiceBase {
+
+		@Override
+		public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+			// 1. create an annotation that annotates the data set object passed in the request
+			MyAnnotation annotation = new MyAnnotation();
+			annotation.setAnnotatedObject(request.getDataSetContainer().getDataSet().getReference());
+			// set a new property called "tutorialProperty" to some string
+			annotation.setMyProperty("My property was created on " + new Date());
+
+			// 2. create a response with our annotation created above
+			return createSyncResponse( //
+						ResponseCode.OK, // Everything works OK
+						"Everything worked", // human-readable message
+						Collections.singletonList(annotation) // new annotations
+			);
+		}
+	}
+
+What does the code do?
+The code basically consists of two parts:
+
+1. Create a new ``MyAnnotation`` object and annotate the data set that is passed into
+the discovery service with it.
+2. Create the discovery service response with the new annotation and return it.
+
+
+#### Step 3 (optional): Check the new annotation class
+The project also contains a new Java class called ``odftutorials.MyAnnotation``
+which extends the class ``org.apache.atlas.odf.core.metadata.ProfilingAnnotation``.
+It is a new annotation type that contains a property called ``myProperty`` of type ``String``.
+In the code you can see that there is a Java-Bean style getter and a setter method, i.e., ``getTutorialProperty()`` and
+``setTutorialProperty(String value)``.
+
+	public class MyAnnotation extends ProfilingAnnotation {
+
+		private String myProperty;
+
+		public String getMyProperty() {
+			return myProperty;
+		}
+
+		public void setMyProperty(String myValue) {
+			this.myProperty = myValue;
+		}
+	}
+
+When we return these annotations, ODF will take care that these annotations are stored
+appropriately in the metadata store.
+
+
+#### Step 4 (optional): Check the discovery service descriptor
+Lastly, the project contains a file called ``META-INF/odf/odf-services.json``
+in the ``main/resources`` folder. This file which always have to have the same
+name contains a JSON list of the the descriptions of all services defined in our project.
+The descriptions contain an ID, a name, a short human-readable description, together
+with the Java class name implementing the service. Here is how it looks like:
+
+	[
+	  {
+		"id": "odftutorials.discoveryservicetutorial.MyDiscoveryService",
+		"name": "My service",
+		"description": "My service creates my annotation for a data set",
+		"type": "Java",
+		"endpoint": "odftutorials.MyDiscoveryService"
+	  }
+	]
+
+Note that most of this information can be changed but ``type`` (this is a Java implementation)
+and ``endpoint`` (the Java class is called ``odftutorials.MyDiscoveryService``)
+should remain as the are.
+
+#### Step 5: Build the service JAR
+The service jar is a standard jar file so you can build it with a standard Maven command like
+
+	mvn clean install
+
+You can find the output jar as per the Maven convention in
+``target/discoveryservicetutorial-0.1.jar``
+
+
+## Deploy the discovery service
+Once you've built your service JAR as described in the previous section, there are two ways
+how you can deploy it.
+
+### Classpath deployment
+The simplest way to make an ODF instance pickup your new service is add the service JAR (and
+any dependent JARs) to the ODF classpath. A simple way to do this is to package the JARs into
+ODF war file. Once you (re-)start ODF, your new service should be available.
+
+## Run the discovery service
+
+Perform these steps to run your new service and inspect the results.
+
+1. Go to the Analysis tab in the ODF console
+2. Select the Data Sets tab and click on Start Analysis next to any data set
+3. Select "My Service" as the discovery service and click Submit.
+4. Select the Requests tab and click Refresh
+5. You should see a new entry showing the data set and the "My Service" discovery service.
+6. Click on Annotations. A new page will open that opens the Atlas UI with a list of the new
+annotation that was created.
+7. Click on the annotation and check the value of the "myProperty" property. It should contain
+a value like ``My property was created on  Mon Feb 01 18:31:51 CET 2016``.
diff --git a/odf/odf-doc/src/site/markdown/discovery-services.md b/odf/odf-doc/src/site/markdown/discovery-services.md
new file mode 100755
index 0000000..acdc259
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/discovery-services.md
@@ -0,0 +1,19 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Discovery services
diff --git a/odf/odf-doc/src/site/markdown/examples.md b/odf/odf-doc/src/site/markdown/examples.md
new file mode 100755
index 0000000..c1a0bdc
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/examples.md
@@ -0,0 +1,19 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Examples
diff --git a/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md b/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md
new file mode 100755
index 0000000..75e2666
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/first-analysis-tutorial.md
@@ -0,0 +1,21 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Run your first ODF analysis
+
+See the [First steps](first-steps.html) section for details on how to run the analysis from the ODF console UI.
diff --git a/odf/odf-doc/src/site/markdown/first-steps.md b/odf/odf-doc/src/site/markdown/first-steps.md
new file mode 100755
index 0000000..7284c6f
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/first-steps.md
@@ -0,0 +1,81 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# First Steps
+
+This section assumes that you have ODF installed, either the
+[test environment](test-env.html) or [manually](install.html).
+
+
+### ODF Console UI
+
+To open the ODF console point your browser to the ODF web application.
+In the [test environment](test-env.html), this is typically
+[https://localhost:58081/odf-web-0.1.0-SNAPSHOT](https://localhost:58081/odf-web-0.1.0-SNAPSHOT).
+
+*Note*: The links to the ODF Console in the instructions below only work if you view this documentation
+from the ODF web application.
+
+The default user of the ODF Console is odf / admin4odf.
+
+
+#### Check System health
+Go to the [System monitor](/odf-web-0.1.0-SNAPSHOT/#monitor) tab and
+click "Check health". After a while you should see a message
+that the health check was successful. If this check fails it might be the case that the dependent services
+are not fully up and running yet (typically happens after start of the test environment), so wait a short while
+and try again.
+
+#### Discovery Services
+Take a look at all available discovery services on the tab [Discovery Services](/odf-web-0.1.0-SNAPSHOT/#discoveryServices).
+
+#### Configure Atlas repository and create sample metadata
+
+To change the URL of your Atlas installation and to create some sample data, go
+to the [Configuration](/odf-web-0.1.0-SNAPSHOT/#configuration) tab.
+In general, it is a good idea change the default URL to Atlas from "localhost" to a hostname that is accessible
+from your network. If you don't do this, you might experience some strange effects when viewing
+Atlas annotations from the web app.
+If you changed the name, click Save.
+
+Create a set of simple sample data by clicking on Create Atlas Sample Data.
+
+To explore the sample data go to the [Data Sets](/odf-web-0.1.0-SNAPSHOT/#data) tab.
+
+#### Run analysis
+
+The easiest way to start an analysis is from the [Data Sets](/odf-web-0.1.0-SNAPSHOT/#data) tab.
+In the "Data Files" section look for the sample table "BankClientsShort".
+To view the details of the table click on it anywhere in the row. The Details dialog
+shows you information about this data set. Click Close to close the dialog.
+
+To start an analysis on the "BankClientsShort" table click "Start Analysis" on the right.
+In the "New Analysis Request" dialog click on "&lt;Select a Service&gt;" to add a service to
+the sequence of discovery service to run on the data set. Then click "Submit" to start the analysis.
+
+To check the status of your request go to the
+[Analysis](/odf-web-0.1.0-SNAPSHOT/#analysis) tab and click Refresh.
+If all went well the status is "Finished".
+Click on "View Results" to view all annotations created for this analysis request.
+
+
+### REST API
+See the [REST API documentation](/odf-web-0.1.0-SNAPSHOT/swagger) for more details on how to
+perform the actions explained above with the REST API.
+In particular, have a look at the ``analysis`` REST resource for APIs how to start and
+monitor analyis requests.
diff --git a/odf/odf-doc/src/site/markdown/index.md b/odf/odf-doc/src/site/markdown/index.md
new file mode 100755
index 0000000..780f13c
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/index.md
@@ -0,0 +1,28 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Overview
+
+The "Open Discovery Framework" is an open metadata-based framework that strives to be a common home for different analytics technologies that discover characteristics of data sets and relationships between them (think "AppStore for discovery algorithms"). Using ODF, applications can leverage new discovery algorithms and their results with minimal integration effort.
+
+Automated characterization of information and automated discovery of relationships is key to several Analytics Platform initiatives, e.g. enable and improve self service for knowledge workers.
+The Open Discovery Platform provides infrastructure based on open source technology to easily execute, manage and integrate diverse metadata discovery algorithms provided by internal
+or external (open source) contributors in a single point of access. These discovery algorithms store their analysis results in a common open metadata
+repository that promotes reuse and sharing of these results.
+A simple plug-in mechanism to integrate discovery services enables users of ODF to easily combine and orchestrate algorithms built on different technologies,
+thereby gaining deeper insights into their data.
diff --git a/odf/odf-doc/src/site/markdown/install.md b/odf/odf-doc/src/site/markdown/install.md
new file mode 100755
index 0000000..a8ca26f
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/install.md
@@ -0,0 +1,153 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Install ODF manually
+
+This section describes how to manually install ODF and its prerequisites.
+
+## Install ODF locally
+
+ODF is installed on an application server like jetty. Its prereqs (Kafka and Atlas)
+can run on separate machines, they simple must be reachable over the network.
+
+ODF's configuration is stored in Zookeeper which is a prereq for Kafka.
+
+
+### Prerequisites
+
+ODF has two prerequisites:
+
+1. Apache Atlas (only tested with 0.6 but no hard dependency required here)
+2. Apache Kafka 0.8.2.1 which, in turn, requires Zookeper 3.4
+
+#### Apache Atlas
+
+[Apache Atlas](http://atlas.incubator.apache.org/) is an open
+metadata infrastructure that is currently in incubator status.
+There is currently no binary download available, you have to [build it yourself](http://atlas.incubator.apache.org/InstallationSteps.html).
+Alternatively, you can download a version that we built [here](https://ibm.box.com/shared/static/of1tdea7465iaen8ywt7l1h761j0fplt.zip).
+
+After you built the distribution, simply unpack the tar ball, and run
+``bin/atlas_start.py``. Atlas will be started on port 21443 as default, so point
+your browser to [https://localhost:21443](https://localhost:21443) to look at the
+Atlas UI.
+Note that starting Atlas can take up to a minute.
+
+To stop, run ``bin/atlas_stop.py``.
+
+See the Atlas section in the [Troubleshooting guide](troubleshooting.html)
+for common issues and how to workaround them.
+
+#### Apache Kafka
+
+[Apache Kafka](http://kafka.apache.org/) is an open source project that implements
+a messaging infrastructure. ODF uses Kafka for notifications and queueing up requests to
+discovery services.
+
+To install Kafka, download the version 0.8.2.1 with Scala 2.10 (which is the version we used in
+our tests) from the Kafka website, see [here](https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.10-0.8.2.1.tgz).
+
+After unpacking the tar ball these steps should get you going:
+
+1. CD to the distribution directory.
+2. Start zookeeper first by running ``bin/zookeeper-server-start.sh config/zookeeper.properties``.
+3. Start Kafka: ``bin/kafka-server-start.sh config/server.properties``
+
+By default, Zookeeper is running on port 2181 and Kafka runs on port 9092. You can change the Zookeeper
+port by changing the properties ``clientPort`` in ``config/zookeeper.properties`` and
+``zookeeper.connect`` in ``config/server.properties``. Change the Kafka port by changing
+``port`` in ``config/server.properties``.
+
+For Windows, run the respective .bat commands in the ``bin\windows`` directory.
+
+
+### Deploy ODF
+
+The only ODF artifact you need for deployment is the war file built by the odf-web maven project, which can typically
+be found here:
+
+	odf-web/target/odf-web-0.1.0-SNAPSHOT.war
+
+To tell ODF which Zookeeper / Kafka to use you will need to set the
+Java system property ``odf.zookeeper.connect`` to point
+to the Zookeeper host and port. The value is typically the same string as the ``zookeeper.connect`` property
+in the Kafka installation ``config/server.properties`` file:
+
+	-Dodf.zookeeper.connect=zkserver.example.org:2181
+
+Note that if this property is not set, the default is ``localhost:52181``.
+
+
+#### Application Server
+
+ODF should run on any application server. As of now we have done most of our testing on Jetty.
+
+##### Jetty
+
+[Jetty](https://eclipse.org/jetty/) is an open source web and application server.
+We have used version 9.2.x for our testing (the most current one that supports Java 7).
+Download it from the web site [https://eclipse.org/jetty/](https://eclipse.org/jetty/).
+
+Here are some quick start instructions for creating a new Jetty base. Compare
+the respective Jetty documentation section [here](http://www.eclipse.org/jetty/documentation/9.2.10.v20150310/quickstart-running-jetty.html#creating-jetty-base).
+
+First, for in order to enable basic authentication, the following configuration needs to be added to the `etc/jetty.xml` file, right before the closing `</Configure>` tag at the end of the file:
+
+```
+<Call name="addBean">
+	<Arg>
+		<New class="org.eclipse.jetty.security.HashLoginService">
+			<Set name="name">ODF Realm</Set>
+			<Set name="config"><SystemProperty name="jetty.home" default="."/>/etc/realm.properties</Set>
+		</New>
+	</Arg>
+</Call>
+```
+
+Secondly, a `etc/realm.properties` file needs to be added that contains the credentials of the ODF users in the following [format](http://www.eclipse.org/jetty/documentation/9.2.10.v20150310/configuring-security-authentication.html#security-realms):
+
+```
+<username>: <password>[,<rolename> ...]
+```
+
+Then, you will have to create and initialize new directory where you deploy your web apps and
+copy the ODF war there. These commands should do the trick:
+
+	mkdir myjettybase
+	cd myjettybase
+	java -jar $JETTY_HOME\start.jar --add-to-startd=https,ssl,deploy
+	cp $ODFDIR/odf-web-0.1.0-SNAPSHOT.jar webapps
+	java -Dodf.zookeeper.connect=zkserver.example.org:2181 -jar $JETTY_HOME\start.jar
+
+The first java command initializes the jetty base directory by creating a directory ``start.d`` which
+contains some config files (e.g. http.ini contains the port the server runs on) and the
+empty ``webapps`` directory.
+The copy command copies the ODF war file to the webapps folder.
+The last command starts Jetty (on default port 8443). You can stop it by hitting Ctrl-C.
+
+You should see a message like this one indicating that the app was found and started.
+
+	2016-02-26 08:28:24.033:INFO:oejsh.ContextHandler:Scanner-0: Started o.e.j.w.WebAppContext@-545d793e{/odf-web-0.1.0-SNAPSHOT,file:/C:/temp/jetty-0.0.0.0-8443-odf-web-0.1.0-SNAPSHOT.war-_odf-web-0.1.0-SNAPSHOT-any-8485458047819836926.dir/webapp/,AVAILABLE}{myjettybase\webapps\odf-web-0.1.0-SNAPSHOT.war}
+
+Point your browser to [https://localhost:8443/odf-web-0.1.0-SNAPSHOT](https://localhost:8443/odf-web-0.1.0-SNAPSHOT) to see the ODF console.
+
+
+
+##### Websphere Liberty Profile
+
+Stay tuned
diff --git a/odf/odf-doc/src/site/markdown/odf-metadata-api.md b/odf/odf-doc/src/site/markdown/odf-metadata-api.md
new file mode 100755
index 0000000..d9914cf
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/odf-metadata-api.md
@@ -0,0 +1,67 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# ODF Metadata API
+
+ODF provides a very simple API for searching, retrieving, and (to a limited extent) for creating
+new metadata objects.
+This API abstracts away specifics of the underlying metadata store.
+See the REST resource `metadata`, e.g., [here](https://sdp1.rtp.raleigh.ibm.com:58081/odf-web-0.1.0-SNAPSHOT/swagger/#/metadata) or look at the Java interface `org.apache.atlas.odf.core.metadata.MetadataStore`.
+
+In this API we distinguish between `MetaDataObject`s and `MetadataObjectReferences`.
+Where the former represent an object as such, the latter is just a reference to an object.
+You may think of the `MetaDataObjectReference` as a generalized XMeta RID.
+
+Simply put, metadata objects are represented as JSON where object attributes
+are represented as JSON attribute with the same name.
+Simple types map to JSON simple types. References
+to another object are represented of JSON objects of type `MetadataObjectReference` that
+has three attribute:
+1. `id`: the object ID
+2. `repositoryId`: the ID of the repository where the object resides
+3. `url` (optional): A URL pointing to the object. For Atlas, this is a link to the object in the
+Atlas dashboard.
+
+The API is read-only, the only objects that can be created are annotations (see section [Data model and extensibility](data-model.html).
+
+Here is an example: suppose there is a table object which has a name and a list of columns. The JSON of this table would look something like this:
+
+	{
+	   "name": "CUSTOMERS,
+	   "columns": [
+	                 {
+	                    "id": "1234-abcd",
+	                    "repositoryId": "atlas:repos1"
+	                 },
+	                 {
+	                    "id": "5678-efgh",
+	                    "repositoryId": "atlas:repos1"
+	                 }
+	              ],
+	   "reference": {
+	                  "id": "9abc-ijkl",
+	                  "repositoryId": "atlas:repos1"
+	                },
+	   "javaClass": "corg.apache.atlas.odf.core.metadata.models.Table"              
+	}
+
+The `reference` value represent the reference to the object itself where as
+`javaClass` denotes the type of object (table in this case).
+The `name` attribute contains the table name where the `columns` value is a list
+of references to two column objects. These references can be retrieved separately
+to look at the details.
diff --git a/odf/odf-doc/src/site/markdown/operations.md b/odf/odf-doc/src/site/markdown/operations.md
new file mode 100755
index 0000000..d5190c2
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/operations.md
@@ -0,0 +1,19 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Operations
diff --git a/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md b/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md
new file mode 100755
index 0000000..2a778c2
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/spark-discovery-service-tutorial.md
@@ -0,0 +1,210 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Tutorial: Creating Spark discovery services
+
+This tutorial shows how to turn an existing [Apache Spark][1] application into an ODF discovery service of type *Spark*. The tutorial is based the Spark *summary statistics* example application provided with ODF in project `odf-spark-example-application`. It implements the Spark `describe()` method of the Spark [DataSet][2] class that calculates basic summary statistics on a Spark data frame.
+
+## Introduction
+
+ODF supports Spark applications implemented in Java or Scala. In order to be used as ODF discovery services, a Spark application must implement one of the following two interfaces:
+
+* **DataFrame** - intended for Spark applications that process relational tables by using Spark data frames internally.
+* **Generic** - intended for applications that need the full flexibility of ODF.
+
+Both interfaces requires a specific method (or multiple methods) to be implemented by the Spark application that is called by ODF to run the discovery service. This method takes the current Spark context and the data set to be processed as input parameters and returns the annotations to be created. The two interface types are described in detail in separate sections below.
+
+Spark discovery services must be packaged into a single application jar file that contains all required dependencies. Spark libraries, drivers for data access, and the required ODF jar files are implicitly provided by ODF and do not need to be packaged into the application jar file. The jar file may be renamed into *zip* by replacing its extension (not by zipping the jar file) in order to avoid possible security issues when making the file available trough tools like [box](https://box.com).
+
+### Configure an ODF Spark cluster
+
+ODF supports access to a local spark cluster which can be can be configured in the `sparkConfig` section of the ODF settings using the ODF REST API or the ODF web application. The parameter `clusterMasterUrl` must point to the master URL of your Spark cluster, e.g. `spark://dyn-9-152-202-64.boeblingen.de.ibm.com:7077`. An optional set of [Spark configuration options](http://spark.apache.org/docs/latest/configuration.html) can be set in the `configs` parameter by providing appropriate name value pairs. The ODF test environment comes with a ready-to-use local Spark cluser running on your local system. It can monitored on the URL `http://localhost:8080/`.
+
+### Registering a Spark service
+
+A Spark discovery service can be registered using the *Services* tab of the admin Web application or the `/services` endpoint of the [ODF REST API](../swagger/ext-services.html), the following parameters need to be specified to register a service. You may use the following example values to register your own instance of the *summary statistics* discovery service:
+
+* Name of the discovery service: `Spark Summary Statistics`
+* Description: `Calculates summary statistics for a given table or data file.`
+* Unique service ID: `spark-summary-statistics`
+* URL of application jar file (may be renamed to zip): `file:///tmp/odf-spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar` (Update link to point to correct location of the file)
+* Name of entry point to be called: `org.apache.atlas.odf.core.spark.SummaryStatistics`
+* Service interface type: `DataFrame`
+
+For trying out the *generic* interface, entry point `org.apache.atlas.odf.spark.SparkDiscoveryServiceExample` and service interface type `Generic` may be specified.   
+
+### Testing the Spark service
+
+In order to test the Spark service, you can use the *DataSets* tab of the ODF admin Web application. Click on *START ANALYSIS* right to a relational data set (data file or relational table), then select the newly registered Spark discovery service and click *SUBMIT*. You can browse the resulting annotations by searching for the name of the annotation type in the Atlas metadata repository. The example services creates two types of annotations, *SummaryStatisticsAnnotation* and *SparkTableAnnotation*. *SummaryStatisticsAnnotation* annotates data set columns with the five attributes `count`, `mean`, `stddev`, `min`, and `max`, that represent basic statistics of the data set. *SparkTableAnnotation* annotates the data set with a single attribute `count` that represents the number of columns of the data set.
+
+### Developing Spark discovery services
+
+When developing a new discovery service, you may use project `odf-spark-example-application` as a template. Rather than testing your service interactively using the ODF admin web application it is recommended to create a new test case in class `SparkDiscoveryServiceTest` of project `odf-core`. Two methods need to be added, one for describing the service, the other for running the actual test.
+
+The method that describes the service basically contains the same parameters that need to be specified when adding a service through the admin webapp. The jar file must be an URL that pay point to a local file:  
+
+	public static DiscoveryServiceRegistrationInfo getSparkSummaryStatisticsService() {
+		DiscoveryServiceRegistrationInfo regInfo = new DiscoveryServiceRegistrationInfo();
+		regInfo.setId("spark-summary-statistics-example-service");
+		regInfo.setName("Spark summary statistics service");
+		regInfo.setDescription("Example discovery service calling summary statistics Spark application");
+		regInfo.setIconUrl("spark.png");
+		regInfo.setLink("http://www.spark.apache.org");
+		regInfo.setParallelismCount(2);
+		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
+		endpoint.setJar("file:/tmp/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
+		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
+		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
+		regInfo.setEndpoint(endpoint);
+		return regInfo;
+	}
+
+The method that runs the actual test retrieves the service description from the above method and specifies what type of data set should be used for testing (data file vs. relational table) and what types of annotations are created by the discovery service. The test automatically applies the required configurations, runs the service, and checks whether new annotations of the respective types have been created. In order to speed up processing, the existing test can be temporarily commented out.  
+
+	@Test
+	public void testLocalSparkClusterWithLocalDataFile() throws Exception{
+		runSparkServiceTest(
+			getLocalSparkConfig(),
+			DATASET_TYPE.DataFile,
+			getSparkSummaryStatisticsService(),
+			new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" }
+		);
+	}
+
+For compiling the test case, the `odf-core` project needs to be built:
+
+	cd ~/git/shared-discovery-platform/odf-core
+	mvn clean install -DskipTests
+
+The test is started implicitly when building the  `odf-spark` project.
+
+	cd ~/git/shared-discovery-platform/odf-spark
+	mvn clean install
+
+If something goes wrong, debugging information will be printed to stdout during the test. For speeding up the build and test process, option `-Duse.running.atlas` may be added to the two `mvn` commands. This way, a running Atlas instance will be used instead of starting a new instance every time.
+
+#### Test run method example
+
+### Troubleshooting
+
+Before registering a Spark application in ODF as a new discovery service, it is highly recommended to test the application interactively using the `spark-submit` tool and to check whether the application implements the requested interfaces and produces the expected output format. If the execution of a Spark discovery service fails, you can browse the ODF log for additional information.
+
+## DataFrame interface
+
+The ODF *DataFrame* interface for Spark discovery services has a number of advantages that makes it easy to turn an existing Spark application into an ODF discovery service:
+
+* No dependencies to the ODF code, except that a specific method needs to be implemented.
+* No need to care about data access because the data set to be analyzed is provided as Spark data frame.
+* Easy creation of annotations by returning "annotation data frames".   
+
+The simplicity of the DataFrame interface leads to a number of restrictions:
+
+* Only relational data sets can be processed, i.e. data files (OMDataFile) and relational tables (OMTable).
+* Annotations may only consist of a flat list of attributes that represent simple data types, i.e. data structures and references to other data sets are not supported.  
+* Annotations may only be attached to the analyzed relational data set as well as to its columns.
+
+### Method to be implemented
+
+In order to implement the DataFrame interface, the Spark application must implement the following method:
+
+	public static Map<String,Dataset<Row>> processDataFrame(JavaSparkContext sc, DataFrame df, String[] args)
+
+The parameters to be provided to the Spark application are:
+
+* **sc**: The Spark context to be used by the Spark application for performing all Spark operations.
+* **df**: The data set to be analyzed represented by a Spark data frame.
+* **args**: Optional arguments for future use.
+
+### Expected output
+
+The result to be provided by the Spark application must be of type `Map<String,Dataset<Row>>` where `String` represents the type of the annotation to be created and `Dataset<Row>` represents the *annotation data frame* that defines the annotations to be created. If the annotation type does not yet exist, a new annotation type will be dynamically created based on the attributes of the annotation data frame.
+
+The following example describes the format of the annotation data frame. The example uses the BankClientsShort data file provided with ODF. In contains 16 columns with numeric values that represent characteristics of bank clients:
+
+CUST_ID | ACQUIRED | FIRST_PURCHASE_VALUE | CUST_VALUE_SCORE | DURATION_OF_ACQUIRED | CENSOR | ACQ_EXPENSE | ACQ_EXPENSE_SQ | IN_B2B_INDUSTRY | ANNUAL_REVENUE_MIL | TOTAL_EMPLOYEES | RETAIN_EXPENSE | RETAIN_EXPENSE_SQ CROSSBUY | PURCHASE_FREQ | PURCHASE_FREQ_SQ
+---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---
+481 | 0 | 0.0 | 0.0000 | 0 | 0 | 382.32 | 146168.58 | 0 | 56.51 | 264 | 0.00 | 0.0 | 0 | 0 | 0
+482 | 1 | 249.51 | 59.248 | 730 | 1 | 586.61 | 344111.29 | 1 | 35.66 | 355 | 1508.16 | 2274546.59 | 2 | 3 | 9
+483 | 0 | 0.0 | 0.0000 | 0 | 0 | 444.61 | 197678.05 | 1 | 40.42 | 452 | 0.00 | 0.0 | 0 | 0 | 0
+484 | 1 | 351.41 | 77.629 | 730 | 1 | 523.10 | 273633.61 | 1 | 56.36 | 320 | 2526.72 | 6384313.96 | 3 | 12 | 144
+485 | 1 | 460.04 | 76.718 | 730 | 1 | 357.78 | 128006.53 | 1 | 23.53 | 1027 | 2712.48 | 7357547.75 | 2 | 13 | 169
+486 | 1 | 648.6 | 0.0000 | 701 | 0 | 719.61 | 517838.55 | 0 | 59.97 | 1731 | 1460.64 | 2133469.21 | 5 | 11 | 121
+487 | 1 | 352.84 | 63.370 | 730 | 1 | 593.44 | 352171.03 | 1 | 45.08 | 379 | 1324.62 | 1754618.14 | 4 | 8 | 64
+488 | 1 | 193.18 | 0.0000 | 289 | 0 | 840.30 | 706104.09 | 0 | 35.95 | 337 | 1683.83 | 2835283.47 | 6 | 12 | 144
+489 | 1 | 385.14 | 0.0000 | 315 | 0 | 753.13 | 567204.80 | 0 | 58.85 | 745 | 1214.99 | 1476200.7 | 1 | 12 | 144
+
+When applying the *Spark Summary Statistics* service to the table, two annotation data frames will be returned by the service, one for the *SparkSummaryStatistics* and one for the *SparkTableAnnotation* annotation type. The data frame returned for the *SparkSummaryStatistics* annotation type consists of one column for each attribute of the annotation. In the example, the attributes are `count`, `mean`, `stddev`, `min`, and `max` standing for the the column count, the mean value, the standard deviation, the minimum and the maximum value of each column. Each row represents one annotation to be created. The first column `ODF_ANNOTATED_COLUMN` stands for the column of the input data frame to which the annotation should be assigned.
+
+ODF_ANNOTATED_COLUMN    |count   |                mean |              stddev |       min |       max
+------------------------|--------|---------------------|---------------------|-----------|----------
+              CLIENT_ID |  499.0 |   1764.374749498998 |  108.14436025195488 |    1578.0 |    1951.0
+                    AGE |  499.0 |   54.65130260521042 |  19.924220223453258 |      17.0 |      91.0
+          NBR_YEARS_CLI |  499.0 |  16.847695390781563 |  10.279080097460023 |       0.0 |      48.0
+        AVERAGE_BALANCE |  499.0 |   17267.25809619238 |   30099.68272689043 |  -77716.0 |  294296.0
+             ACCOUNT_ID |  499.0 |   126814.4749498998 |  43373.557241804665 |  101578.0 |  201950.0
+
+If there is no (first) column named `ODF_ANNOTATED_COLUMN`, the annotations will be assigned to the data set rather than to its columns. The following example annotation data frame of type *SparkTableAnnotation* assigns a single attribute `count` to the data set:
+
+| count |
+|-------|
+| 499   |
+
+### Example implementation
+
+The implementation of the The *summary statistics*  discovery service may be used as a reference implementation for the DataFrame interface. It is available in class `SummaryStatistics` of project `odf-spark-example-application`.
+
+## Generic interface
+
+The *generic* interface provides the full flexibility of ODF discovery services implemented in Java (or Scala):
+
+* No restrictions regarding the types of data sets to be analyzed.
+* Arbitrary objects may be annotated because references to arbitrary objects may be retrieved from the meta data catalog.
+* Annotations may contain nested structures of data types and references to arbitrary objects.
+
+On the downside, the generic interface may be slightly more difficult to use than the DataFrame interface:
+
+* Discovery service must implement a specific ODF interface.
+* Spark RDDs, data frames etc. must be explicitly constructed (Helper methods are available in class `SparkUtils`).
+* Resulting annotations must be explicitly constructed and linked to the annotated objects.
+
+### Methods to be implemented
+
+The Spark application must implement the `SparkDiscoveryService` interface available in ODF project `odf-core-api`:
+
+	public class SparkDiscoveryServiceExample extends SparkDiscoveryServiceBase implements SparkDiscoveryService
+
+The interface consists of the following two methods that are described in detail in the [Java Docs for ODF services](./apidocs/index.html). The `SparkDiscoveryServiceBase` can be extended for convenience as the `SparkDiscoveryService` interface has much more methods.
+
+#### Actual discovery service logic
+
+This method is called to run the actual discovery service.
+
+	DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request)
+
+#### Validation whether data set can be accessed
+
+This method is called internally before running the actual discovery service.
+
+	DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer)
+
+### Example implementation
+
+Class class `SparkDiscoveryServiceExample` in project `odf-spark-example-application` provides an example implementation of a *generic* discovery service. It provides an alternative implementation of the *summary statistics*  discovery service.
+
+  [1]: http://spark.apache.org/
+  [2]: http://spark.apache.org/docs/latest/api/java/index.html
diff --git a/odf/odf-doc/src/site/markdown/test-env.md b/odf/odf-doc/src/site/markdown/test-env.md
new file mode 100755
index 0000000..a6160c8
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/test-env.md
@@ -0,0 +1,88 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Test environment
+
+The odf-test-env archive contains a simple test environment for ODF.
+It contains all components to run a simple ODF installation, namely
+
+- Apache Kafka
+- Apache Atlas
+- Jetty (to host the ODF web app)
+- Apache Spark
+
+The test environment is available on Linux and Windows.
+
+## Before you start
+
+Make sure that
+
+- The python executable of Python 2.7 is in your path
+- The environment variable JAVA_HOME is set and points to a proper JDK (not just a JRE!)
+
+
+## *Fast path*: Download and install ODF test environment
+
+If you are running on Linux you can download and install the latest ODF test environment by
+downloading the script `download-install-odf-testenv.sh` from
+<a href="https://shared-discovery-platform-jenkins.swg-devops.com:8443/view/1-ODF/job/Open-Discovery-Framework/lastSuccessfulBuild/artifact/odf-test-env/src/main/scripts/download-install-odf-testenv.sh">
+here</a>.
+
+If you call the script with no parameters, it will download, install and start the latest version of the test env.
+The default unpack directory is `~/odf-test-env`.
+
+## Download the test environment manually
+
+You can get the latest version of the test environment from the Jenkins
+<a href="https://shared-discovery-platform-jenkins.swg-devops.com:8443/view/1-ODF/job/Open-Discovery-Framework/lastSuccessfulBuild/artifact/odf-test-env/target/odf-test-env-0.1.0-SNAPSHOT-bin.zip">
+here</a>.
+
+## Running the test environment
+
+To start the test environment on Linux, run the script ``odftestenv.sh start`` . The script will start four background processes (Zookeeper, Kafka, Atlas, Jetty). To stop the test env, use the script ``odftestenv.sh stop``.
+
+To start the test environment on Windows, run the script ``start-odf-testenv.bat``.
+This will open four command windows (Zookeeper, Kafka, Atlas, Jetty) with respective window titles. To stop the test environment close all these windows. Note that the `HADOOP_HOME` environment variable needs to be set on Windows as described in the [build documentation](build.md).
+
+
+Once the servers are up and running you will reach the ODF console at
+[https://localhost:58081/odf-web-0.1.0-SNAPSHOT](https://localhost:58081/odf-web-0.1.0-SNAPSHOT).
+
+*Note*: The test environment scripts clean the Zookeeper and Kafka data before it starts.
+This means in particular that the configuration will be reset every time you restart it!
+
+Have fun!
+
+## Restart / cleanup
+
+On Linux, the `odftestenv.sh` script has these additional options
+
+- `cleanconfig`: Restart the test env with a clean configuration and clean Kafka topics
+- `cleanmetadata`: Restart with empty metadata
+- `cleanall`: Both `cleanconfig`and `cleanmetadata`.
+
+
+## Additional Information
+
+### Deploying a new version of the ODF war
+Once started you can hot-deploy a new version of the ODF war file simply by copying it
+to the ``odfjettybase/webapps`` folder even while the test environment's Jetty instance is running.
+Note that it may take a couple of seconds before the new app is available.
+
+If you have the ODF build set up you may want to use the ``deploy-odf-war.bat/.sh`` for this.
+You must edit the environment variable ``ODF_GIT_DIR`` in this script first to point to your local build directory.
diff --git a/odf/odf-doc/src/site/markdown/troubleshooting.md b/odf/odf-doc/src/site/markdown/troubleshooting.md
new file mode 100755
index 0000000..1c8d95e
--- /dev/null
+++ b/odf/odf-doc/src/site/markdown/troubleshooting.md
@@ -0,0 +1,127 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+# Troubleshooting
+
+## ODF
+
+### Debugging using eclipse
+
+You can run Jetty inside Eclipse using the “Eclipse Jetty Feature” (Eclipse -> Help -> Install New Software…).
+Then, create a new debug configuration (Run -> Debug Configurations…). Specify
+
+WebApp Tab
+Project: odf-web
+WebApp Folder: ../../../../../odf-web/src/main/webapp
+Context Path: /odf-web-0.1.0-SNAPSHOT
+HTTP / HTTPs Port: 58081
+
+Arguments Tab
+VM Arguments: -Dodf.zookeeper.connect=localhost:52181
+
+As the Eclipse Jetty plugin does not support secure connections nor basic authentication, remove the `<security-constraint>`
+and `<login-config>`
+sections from the web.xml.
+The URL of the ODF Webapp the needs to be prefixed with http:// rather than https://.
+
+Then start Atlas and Kafka via the test-env (just comment out the line that starts jetty or stop it after being started).
+Now you can use the debug configuration in eclipse to start ODF.
+
+See also (https://ibm-analytics.slack.com/archives/shared-discovery-pltf/p1467365155000009)
+
+
+### Logs and trace
+ODF uses ``java.util.logging`` APIs so if your runtime environment does support
+direct setting, use the respective mechanism.
+
+For runtimes that don't support this out-of-the-box (like Jetty) you can set the JVM system property
+``odf.logspec`` with a value like ``<Level>,<Path>`` which advises ODF to
+write the log with logging level ``<Level>`` to the file under ``<Path>``.
+
+Example:
+
+	-Dodf.logspec=ALL,/tmp/myodflogfile.log
+
+Availabel log levels are the ones for java.util.logging, namely SEVERE, WARNING, INFO, FINE, FINER, FINEST,
+and ALL.
+
+
+## Atlas
+
+### Logs
+
+The logs directory contains a bunch of logfiles, together with a file called ``atlas.pid`` which
+contains the process ID of the Atlas server that is currently running.
+In case of issues the file ``logs/application.log`` should be checked first.
+
+### Restarting Atlas
+
+Run these commands (from the atlas installation directory) to restart Atlas
+
+	bin/atlas_stop.py
+	bin/atlas_start.py
+
+### Clean all data
+
+To clean the Atlas repository, simply remove the directories ``data`` and ``logs`` before starting.
+
+
+### Issues
+
+#### Service unavailable (Error 503)
+
+Sometimes, calling any Atlas REST API (and the UI) doesn't work and an HTTP error 503 is returned.
+We see this error occasionally and don't know any way to fix it except cleaning all data and restarting Atlas
+
+
+### Creating Atlas object take a long time
+
+It takes a long time to create an Atlas object and after about a minute you see a message like this in the log
+
+	Unable to update metadata after 60000ms
+
+This is the result of the kafka queues (which are used for notifications) being in error.
+To fix this restart Atlas (no data cleaning required).
+
+## Kafka / Zookeeper
+
+If there is a problem starting Kafka / Zookeeper check if there might be a port conflict due to other instances of Kafka / Zookeeper using the default port.
+This might be the case if a more recent version of the IS suite is installed on the system on which you want to run ODF.
+
+Example: If another instance of Zookeeper uses the default port 52181 you need to switch the Zookeeper port used by replacing 52181 with a free port number in:
+- start-odf-testenv.bat
+- kafka_2.10-0.8.2.1\config\zookeeper.properties
+- kafka_2.10-0.8.2.1\config\server.properties
+
+### Reset
+
+To reset your Zookeeper / Kafka installation, you will first have to stop the servers:
+
+	bin/kafka-server-stop
+	bin/zookeeper-server-stop
+
+Next remove the zookeeper data directory and the Kafka logs directory. Note that "logs"
+in Kafka mean the actual data in the topics not the logfiles.
+You can find which directories to clean in the the properties ``dataDir`` in the ``zookeeper.properties``
+file and ``log.dirs`` in ``server.properties`` respectively.
+The defaults are ``/tmp/zookeeper`` and ``/tmp/kafka-logs``.
+
+Restart the servers with
+
+	bin/zookeeper-server-start config/zookeeper.properties
+	bin/kafka-server-start config/server.properties
diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml
new file mode 100755
index 0000000..e6ffb46
--- /dev/null
+++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/pom.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+
+	<groupId>odf.tutorials</groupId>
+	<artifactId>odf-tutorial-discoveryservice</artifactId>
+	<version>1.2.0-SNAPSHOT</version>
+	<packaging>jar</packaging>
+
+	<name>odf-tutorial-discoveryservice</name>
+
+	<properties>
+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+	</dependencies>
+</project>
diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java
new file mode 100755
index 0000000..2899a53
--- /dev/null
+++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialAnnotation.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package odftutorial.discoveryservicetutorial;
+
+import org.apache.atlas.odf.core.metadata.Annotation;
+
+/*
+ * An example annotation that adds one property to the default annotation class
+ */
+public class ODFTutorialAnnotation extends Annotation {
+
+	private String tutorialProperty;
+
+	public String getTutorialProperty() {
+		return tutorialProperty;
+	}
+
+	public void setTutorialProperty(String tutorialProperty) {
+		this.tutorialProperty = tutorialProperty;
+	}
+
+}
diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java
new file mode 100755
index 0000000..16848ec
--- /dev/null
+++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryService.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package odftutorial.discoveryservicetutorial;
+
+import java.util.Collections;
+import java.util.Date;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
+import org.apache.atlas.odf.api.discoveryservice.SyncDiscoveryServiceBase;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+/**
+ * A simple synchronous discovery service that creates one annotation for the data set it analyzes.
+ *
+ */
+public class ODFTutorialDiscoveryService extends SyncDiscoveryServiceBase {
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		// 1. create an annotation that annotates the data set object passed in the request
+		ODFTutorialAnnotation annotation = new ODFTutorialAnnotation();
+		annotation.setAnnotatedObject(request.getDataSetContainer().getDataSet().getReference());
+		// set a new property called "tutorialProperty" to some string
+		annotation.setTutorialProperty("Tutorial annotation was created on " + new Date());
+
+		// 2. create a response with our annotation created above
+		return createSyncResponse( //
+				ResponseCode.OK, // Everything works OK 
+				"Everything worked", // human-readable message
+				Collections.singletonList(annotation) // new annotations
+		);
+	}
+
+}
diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json
new file mode 100755
index 0000000..2709548
--- /dev/null
+++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/main/resources/META-INF/odf/odf-services.json
@@ -0,0 +1,13 @@
+[
+  {
+	"id": "odftutorial.discoveryservicetutorial.ODFTutorialDiscoveryService",
+	"name": "First tutorial service",
+	"description": "The first tutorial service that is synchronous and creates just a single annotation for a data set.",
+	"deletable": true,
+	"endpoint": {
+	  "runtimeName": "Java",
+	  "className": "odftutorial.discoveryservicetutorial.ODFTutorialDiscoveryService"
+	},
+	"iconUrl": "https://www-03.ibm.com/ibm/history/exhibits/logo/images/920911.jpg"
+  }
+]
diff --git a/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java
new file mode 100755
index 0000000..1eab53f
--- /dev/null
+++ b/odf/odf-doc/src/site/resources/tutorial-projects/odf-tutorial-discoveryservice/src/test/java/odftutorial/discoveryservicetutorial/ODFTutorialDiscoveryServiceTest.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package odftutorial.discoveryservicetutorial;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Unit test for discovery service
+ */
+public class ODFTutorialDiscoveryServiceTest {
+	
+	@Test
+	public void test() throws Exception {
+		Assert.assertTrue(true);
+	}
+}
+
diff --git a/odf/odf-doc/src/site/site.xml b/odf/odf-doc/src/site/site.xml
new file mode 100755
index 0000000..0a23228
--- /dev/null
+++ b/odf/odf-doc/src/site/site.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="Open Discovery Framework">
+	<skin>
+		<groupId>org.apache.maven.skins</groupId>
+		<artifactId>maven-fluido-skin</artifactId>
+		<version>1.4</version>
+	</skin>
+	<bannerLeft>
+		<name>Open Discovery Framework</name>
+	</bannerLeft>
+	<custom>
+		<fluidoSkin>
+			<topBarEnabled>false</topBarEnabled>
+			<sideBarEnabled>true</sideBarEnabled>
+		</fluidoSkin>
+	</custom>
+	<body>
+		<links>
+			<item name="Apache Atlas" href="http://atlas.incubator.apache.org" />
+			<item name="Apache Kafka" href="http://kafka.apache.org" />
+		</links>
+		<menu name="Getting Started">
+			<item name="Overview" href="index.html" />
+			<item name="First Steps" href="first-steps.html" />
+			<item name="Build" href="build.html" />
+			<item name="Test Environment" href="test-env.html" />
+		</menu>
+		<menu name="Tutorials">
+			<item name="Install ODF and its prerequisites manually" href="install.html"/>
+			<item name="Run your first ODF analysis" href="first-analysis-tutorial.html"/>
+			<item name="Build and run your first Discovery Service" href="discovery-service-tutorial.html"/>
+			<item name="Creating Spark discovery services" href="spark-discovery-service-tutorial.html"/>
+		</menu>
+		<menu name="Reference">
+			<item name="ODF Metadata API" href="odf-metadata-api.html" />
+			<item name="API reference" href="api-reference.html" />
+			<item name="Troubleshooting" href="troubleshooting.html" />
+		</menu>
+		<menu name="Customization">
+			<item name="Discovery Services" href="discovery-services.html" />
+			<item name="Data Model" href="data-model.html" />
+		</menu>
+		<footer>All rights reserved.</footer>
+	</body>
+</project>
diff --git a/odf/odf-messaging/.gitignore b/odf/odf-messaging/.gitignore
new file mode 100755
index 0000000..94858e5
--- /dev/null
+++ b/odf/odf-messaging/.gitignore
@@ -0,0 +1,19 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+derby.log
diff --git a/odf/odf-messaging/pom.xml b/odf/odf-messaging/pom.xml
new file mode 100755
index 0000000..95f9d44
--- /dev/null
+++ b/odf/odf-messaging/pom.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0" encoding="UTF-8"?>

+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

+	<modelVersion>4.0.0</modelVersion>

+	<artifactId>odf-messaging</artifactId>

+	<name>odf-messaging</name>

+

+	<parent>

+		<groupId>org.apache.atlas.odf</groupId>

+		<artifactId>odf</artifactId>

+		<version>1.2.0-SNAPSHOT</version>

+	</parent>

+

+	<dependencies>

+		<dependency>

+			<groupId>org.apache.atlas.odf</groupId>

+			<artifactId>odf-api</artifactId>

+			<version>1.2.0-SNAPSHOT</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.atlas.odf</groupId>

+			<artifactId>odf-core</artifactId>

+			<version>1.2.0-SNAPSHOT</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.kafka</groupId>

+			<artifactId>kafka-clients</artifactId>

+			<version>0.10.0.0</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.kafka</groupId>

+			<artifactId>kafka_2.11</artifactId>

+			<version>0.10.0.0</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>junit</groupId>

+			<artifactId>junit</artifactId>

+			<version>4.12</version>

+			<scope>test</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.atlas.odf</groupId>

+			<artifactId>odf-core</artifactId>

+			<version>1.2.0-SNAPSHOT</version>

+			<type>test-jar</type>

+			<scope>test</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.derby</groupId>

+			<artifactId>derby</artifactId>

+			<version>10.12.1.1</version>

+			<scope>test</scope>

+		</dependency>

+	</dependencies>

+

+	<build>

+		<plugins>

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-jar-plugin</artifactId>

+				<version>2.6</version>

+				<executions>

+					<execution>

+						<goals>

+							<goal>test-jar</goal>

+						</goals>

+						<configuration>

+						<!-- remove implementations properties file for test jar -->

+							<excludes>

+								<exclude>org/apache/atlas/odf/odf-implementation.properties</exclude>

+							</excludes>

+						</configuration>

+					</execution>

+				</executions>

+			</plugin>

+		</plugins>

+	</build>

+

+	<profiles>

+		<profile>

+			<id>all-unit-tests</id>

+			<activation>

+				<activeByDefault>true</activeByDefault>

+			</activation>

+			<build>

+				<plugins>

+					<plugin>

+						<groupId>org.apache.maven.plugins</groupId>

+						<artifactId>maven-surefire-plugin</artifactId>

+						<version>2.19</version>

+						<configuration>

+							<systemPropertyVariables>

+								<odf.logspec>${odf.unittest.logspec}</odf.logspec>

+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

+								<odf.build.project.name>${project.name}</odf.build.project.name>

+

+								<!-- additional properties for the test services -->

+						 		<asynctestservice.testparam>sometestvalueforasync</asynctestservice.testparam>

+								<synctestservice.testparam>sometestvalueforsync</synctestservice.testparam>

+							</systemPropertyVariables>

+							<dependenciesToScan>

+								<dependency>org.apache.atlas.odf:odf-core</dependency>

+							</dependenciesToScan>

+							<!--

+							<includes><include>**ShutdownTest**</include></includes>

+				 -->

+

+							<excludes>

+								<exclude>**/integrationtest/**</exclude>

+								<exclude>**/configuration/**</exclude>

+							</excludes>

+						</configuration>

+					</plugin>

+					<plugin>

+						<groupId>org.apache.maven.plugins</groupId>

+						<artifactId>maven-failsafe-plugin</artifactId>

+						<version>2.19</version>

+						<configuration>

+							<systemPropertyVariables>

+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

+								<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>

+							</systemPropertyVariables>

+							<dependenciesToScan>

+								<dependency>org.apache.atlas.odf:odf-core</dependency>

+							</dependenciesToScan>

+							<includes>

+								<include>**/integrationtest/**/**.java</include>

+							</includes>

+							<excludes>

+								<exclude>**/integrationtest/**/SparkDiscoveryService*</exclude>

+								<exclude>**/integrationtest/**/AnalysisManagerTest.java</exclude>

+							</excludes>

+						</configuration>

+						<executions>

+							<execution>

+								<id>integration-test</id>

+								<goals>

+									<goal>integration-test</goal>

+								</goals>

+							</execution>

+							<execution>

+								<id>verify</id>

+								<goals>

+									<goal>verify</goal>

+								</goals>

+							</execution>

+						</executions>

+					</plugin>

+				</plugins>

+			</build>

+		</profile>

+		<profile>

+			<id>reduced-tests</id>

+			<activation>

+				<property>

+					<name>reduced-tests</name>

+					<value>true</value>

+				</property>

+			</activation>

+			<build>

+				<plugins>

+					<plugin>

+						<groupId>org.apache.maven.plugins</groupId>

+						<artifactId>maven-surefire-plugin</artifactId>

+						<version>2.19</version>

+						<configuration>

+							<systemPropertyVariables>

+								<odf.logspec>${odf.unittest.logspec}</odf.logspec>

+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>

+								<odf.build.project.name>${project.name}</odf.build.project.name>

+							</systemPropertyVariables>

+							<dependenciesToScan>

+								<dependency>corg.apache.atlas.odf:odf-core</dependency>

+							</dependenciesToScan>

+							<excludes>

+								<exclude>**/KafkaQueueManagerTest.java</exclude>

+								<exclude>**/ShutdownTest.java</exclude>

+								<exclude>**/MultiPartitionConsumerTest.java</exclude>

+								<exclude>**/integrationtest/**/SparkDiscoveryService*</exclude>

+								<exclude>**/integrationtest/**/AnalysisManagerTest.java</exclude>

+								<exclude>**/configuration/**</exclude>

+							</excludes>

+						</configuration>

+					</plugin>

+				</plugins>

+			</build>

+		</profile>

+	</profiles>

+

+</project>

diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java
new file mode 100755
index 0000000..c9c95cc
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaMonitor.java
@@ -0,0 +1,545 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.apache.atlas.odf.api.engine.BrokerNode;
+import org.apache.atlas.odf.api.engine.KafkaBrokerPartitionMessageCountInfo;
+import org.apache.atlas.odf.api.engine.KafkaPartitionInfo;
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.protocol.SecurityProtocol;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
+import org.apache.atlas.odf.json.JSONUtils;
+
+import kafka.admin.AdminClient;
+import kafka.admin.AdminClient.ConsumerSummary;
+import kafka.api.FetchRequest;
+import kafka.api.FetchRequestBuilder;
+import kafka.api.GroupCoordinatorRequest;
+import kafka.api.GroupCoordinatorResponse;
+import kafka.api.OffsetRequest;
+import kafka.cluster.Broker;
+import kafka.cluster.BrokerEndPoint;
+import kafka.cluster.EndPoint;
+import kafka.common.ErrorMapping;
+import kafka.common.OffsetAndMetadata;
+import kafka.common.OffsetMetadata;
+import kafka.common.OffsetMetadataAndError;
+import kafka.common.TopicAndPartition;
+import kafka.coordinator.GroupOverview;
+import kafka.javaapi.FetchResponse;
+import kafka.javaapi.OffsetCommitRequest;
+import kafka.javaapi.OffsetCommitResponse;
+import kafka.javaapi.OffsetFetchRequest;
+import kafka.javaapi.OffsetFetchResponse;
+import kafka.javaapi.PartitionMetadata;
+import kafka.javaapi.TopicMetadata;
+import kafka.javaapi.TopicMetadataRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.message.MessageAndOffset;
+import kafka.network.BlockingChannel;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import scala.collection.JavaConversions;
+import scala.collection.Seq;
+
+public class KafkaMonitor {
+	private final static String CLIENT_ID = "odfMonitorClient";
+
+	private Logger logger = Logger.getLogger(KafkaMonitor.class.getName());
+
+	//this only works for consumer groups managed by the kafka coordinator (unlike with kafka < 0.9 where consumers where managed by zookeeper)
+	public List<String> getConsumerGroups(String zookeeperHost, String topic) {
+		List<String> result = new ArrayList<String>();
+		try {
+			List<String> brokers = getBrokers(zookeeperHost);
+			StringBuilder brokersParam = new StringBuilder();
+			final Iterator<String> iterator = brokers.iterator();
+			while (iterator.hasNext()) {
+				brokersParam.append(iterator.next());
+				if (iterator.hasNext()) {
+					brokersParam.append(";");
+				}
+			}
+			Properties props = new Properties();
+			props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokersParam.toString());
+			final AdminClient client = AdminClient.create(props);
+			final Map<Node, scala.collection.immutable.List<GroupOverview>> javaMap = JavaConversions.mapAsJavaMap(client.listAllConsumerGroups());
+			for (Entry<Node, scala.collection.immutable.List<GroupOverview>> entry : javaMap.entrySet()) {
+				for (GroupOverview group : JavaConversions.seqAsJavaList(entry.getValue())) {
+					//Option<scala.collection.immutable.List<ConsumerSummary>> optConsumerSummary = client.describeConsumerGroup(group.groupId());
+					//if (optConsumerSummary.nonEmpty()) {
+						for (ConsumerSummary summary : JavaConversions.seqAsJavaList(client.describeConsumerGroup(group.groupId()) ) ) {
+							for (TopicPartition part : JavaConversions.seqAsJavaList(summary.assignment())) {
+								if (part.topic().equals(topic) && !result.contains(group.groupId())) {
+									result.add(group.groupId());
+								break;
+							}
+						}
+					}
+					//}
+				}
+			}
+		} catch (Exception ex) {
+			logger.log(Level.WARNING, "An error occured retrieving the consumer groups", ex.getCause());
+			ex.printStackTrace();
+		}
+		return result;
+	}
+
+	private ZkUtils getZkUtils(String zookeeperHost, ZkClient zkClient) {
+		return new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false);
+	}
+
+	private ZkClient getZkClient(String zookeeperHost) {
+		return new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
+	}
+
+	public boolean setOffset(String zookeeperHost, String consumerGroup, String topic, int partition, long offset) {
+		logger.info("set offset for " + consumerGroup + " " + offset);
+		long now = System.currentTimeMillis();
+		Map<TopicAndPartition, OffsetAndMetadata> offsets = new LinkedHashMap<TopicAndPartition, OffsetAndMetadata>();
+		final TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
+		offsets.put(topicAndPartition, new OffsetAndMetadata(new OffsetMetadata(offset, "Manually set offset"), now, -1));
+		int correlationId = 0;
+		OffsetCommitRequest req = new OffsetCommitRequest(consumerGroup, offsets, correlationId++, CLIENT_ID, (short) 1);
+		final BlockingChannel channel = getOffsetManagerChannel(zookeeperHost, consumerGroup);
+		channel.send(req.underlying());
+		OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().payload());
+		if (commitResponse.hasError()) {
+			logger.warning("Could not commit offset! " + topic + ":" + partition + "-" + offset + " error: " + commitResponse.errorCode(topicAndPartition));
+			channel.disconnect();
+			return false;
+		} else {
+			logger.info("offset commit successfully");
+			channel.disconnect();
+			return true;
+		}
+	}
+
+	public List<String> getBrokers(String zookeeperHost) {
+		List<String> result = new ArrayList<String>();
+		ZkClient zkClient = getZkClient(zookeeperHost);
+		List<Broker> brokerList = JavaConversions.seqAsJavaList(getZkUtils(zookeeperHost, zkClient).getAllBrokersInCluster());
+		Iterator<Broker> brokerIterator = brokerList.iterator();
+		while (brokerIterator.hasNext()) {
+			for (Entry<SecurityProtocol, EndPoint> entry : JavaConversions.mapAsJavaMap(brokerIterator.next().endPoints()).entrySet()) {
+				String connectionString = entry.getValue().connectionString();
+				//remove protocol from string
+				connectionString = connectionString.split("://")[1];
+				result.add(connectionString);
+			}
+		}
+		zkClient.close();
+		return result;
+	}
+
+	public PartitionOffsetInfo getOffsetsOfLastMessagesForTopic(String zookeeperHost, String topic, int partition) {
+		List<String> kafkaBrokers = getBrokers(zookeeperHost);
+		return getOffsetsOfLastMessagesForTopic(kafkaBrokers, topic, partition);
+	}
+
+	public PartitionOffsetInfo getOffsetsOfLastMessagesForTopic(final List<String> kafkaBrokers, final String topic, final int partition) {
+		logger.entering(this.getClass().getName(), "getOffsetsOfLastMessagesForTopic");
+
+		final PartitionOffsetInfo info = new PartitionOffsetInfo();
+		info.setOffset(-1l);
+		info.setPartitionId(partition);
+
+		final CountDownLatch subscribeAndPollLatch = new CountDownLatch(2);
+
+		final Thread consumerThread = new Thread(new Runnable() {
+			@Override
+			public void run() {
+				Properties kafkaConsumerProps = getKafkaConsumerProps(kafkaBrokers);
+				final KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(kafkaConsumerProps);
+				final TopicPartition topicPartition = new TopicPartition(topic, partition);
+				consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
+
+					@Override
+					public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
+						// TODO Auto-generated method stub
+
+					}
+
+					@Override
+					public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
+						subscribeAndPollLatch.countDown();
+					}
+				});
+				logger.info("poll records from kafka for offset retrieval");
+
+				final ConsumerRecords<String, String> poll = consumer.poll(500);
+				List<ConsumerRecord<String, String>> polledRecords = poll.records(topicPartition);
+				logger.info("polled records: " + poll.count());
+				if (!polledRecords.isEmpty()) {
+					ConsumerRecord<String, String> record = polledRecords.get(polledRecords.size() - 1);
+					info.setMessage(record.value());
+					info.setOffset(record.offset());
+					info.setPartitionId(partition);
+					logger.info("polled last offset: " + record.offset());
+				}
+				subscribeAndPollLatch.countDown();
+				consumer.close();
+			}
+		});
+		logger.info("start retrieval of offset");
+		consumerThread.start();
+
+		try {
+			boolean result = subscribeAndPollLatch.await(5000, TimeUnit.MILLISECONDS);
+			if (result) {
+				logger.info("Subscribed and retrieved offset on time: " + JSONUtils.toJSON(info));
+			} else {
+				logger.warning("Could not subscribe and retrieve offset on time " + JSONUtils.toJSON(info));
+				consumerThread.interrupt();
+			}
+		} catch (InterruptedException e) {
+			e.printStackTrace();
+			logger.log(Level.WARNING, "An error occured retrieving the last retrieved offset", e.getCause());
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.log(Level.WARNING, "An error occured retrieving the last retrieved offset", e.getCause());
+		}
+
+		return info;
+	}
+
+	protected Properties getKafkaConsumerProps(List<String> kafkaBrokers) {
+		Properties kafkaConsumerProps = new Properties();
+		kafkaConsumerProps.put("group.id", "OffsetRetrieverConsumer" + UUID.randomUUID().toString());
+		kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+
+		StringBuilder brokers = new StringBuilder();
+		final Iterator<String> iterator = kafkaBrokers.iterator();
+		while (iterator.hasNext()) {
+			brokers.append(iterator.next());
+			if (iterator.hasNext()) {
+				brokers.append(",");
+			}
+		}
+		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers.toString());
+		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
+		return kafkaConsumerProps;
+	}
+
+	public List<KafkaBrokerPartitionMessageCountInfo> getMessageCountForTopic(String zookeeperHost, String topic) {
+		logger.entering(this.getClass().getName(), "getMessageCountForTopic");
+		List<KafkaBrokerPartitionMessageCountInfo> result = new ArrayList<KafkaBrokerPartitionMessageCountInfo>();
+
+		List<Integer> partitions = getPartitionIdsForTopic(zookeeperHost, topic);
+
+		List<String> kafkaBrokers = getBrokers(zookeeperHost);
+		for (int cnt = 0; cnt < kafkaBrokers.size(); cnt++) {
+			String broker = kafkaBrokers.get(cnt);
+			logger.info("getMessageCountForTopic from broker: " + broker);
+			KafkaBrokerPartitionMessageCountInfo container = new KafkaBrokerPartitionMessageCountInfo();
+			container.setBroker(broker);
+
+			String[] splitBroker = broker.split(":");
+			String host = splitBroker[0];
+			String port = splitBroker[1];
+			SimpleConsumer consumer = new SimpleConsumer(host, Integer.valueOf(port), 100000, 64 * 1024, "leaderLookup");
+			Map<Integer, Long> partitionCountMap = new HashMap<Integer, Long>();
+
+			for (Integer partition : partitions) {
+				logger.info("broker: " + broker + ", partition " + partition);
+				partitionCountMap.put(partition, null);
+				FetchRequest req = new FetchRequestBuilder().clientId(CLIENT_ID).addFetch(topic, partition, 0, 100000).build();
+				FetchResponse fetchResponse = consumer.fetch(req);
+
+				if (fetchResponse.hasError()) {
+					//in case of a broker error, do nothing. The broker has no information about the partition so we continue with the next one.
+					if (fetchResponse.errorCode(topic, partition) == ErrorMapping.NotLeaderForPartitionCode()) {
+						logger.info("broker " + broker + " is not leader for partition " + partition + ", cannot retrieve MessageCountForTopic");
+					} else {
+						logger.warning("broker: " + broker + ", partition " + partition + " has error: " + fetchResponse.errorCode(topic, partition));
+					}
+					continue;
+				}
+
+				long numRead = 0;
+				long readOffset = numRead;
+
+				for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
+					long currentOffset = messageAndOffset.offset();
+					if (currentOffset < readOffset) {
+						logger.info("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
+						continue;
+					}
+					readOffset = messageAndOffset.nextOffset();
+					numRead++;
+				}
+
+				logger.info("broker: " + broker + ", partition " + partition + " total messages: " + numRead);
+				partitionCountMap.put(partition, numRead);
+			}
+			consumer.close();
+			container.setPartitionMsgCountMap(partitionCountMap);
+			result.add(container);
+		}
+
+		return result;
+	}
+
+	/**
+	 * @param group
+	 * @param topic
+	 * @return a list of partitions and their offsets. If no offset is found, it is returned as -1
+	 */
+	public List<PartitionOffsetInfo> getOffsetsForTopic(String zookeeperHost, String group, String topic) {
+		BlockingChannel channel = getOffsetManagerChannel(zookeeperHost, group);
+
+		List<Integer> partitionIds = getPartitionIdsForTopic(zookeeperHost, topic);
+		List<TopicAndPartition> partitions = new ArrayList<TopicAndPartition>();
+		int correlationId = 0;
+		for (Integer id : partitionIds) {
+			TopicAndPartition testPartition0 = new TopicAndPartition(topic, id);
+			partitions.add(testPartition0);
+		}
+
+		OffsetFetchRequest fetchRequest = new OffsetFetchRequest(group, partitions, (short) 1 /* version */, // version 1 and above fetch from Kafka, version 0 fetches from ZooKeeper
+				correlationId++, CLIENT_ID);
+
+		List<PartitionOffsetInfo> offsetResult = new ArrayList<PartitionOffsetInfo>();
+		int retryCount = 0;
+		//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
+		boolean done = false;
+		while (retryCount < 5 && !done) {
+			offsetResult = new ArrayList<PartitionOffsetInfo>();
+			retryCount++;
+			channel.send(fetchRequest.underlying());
+			OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
+
+			boolean errorFound = false;
+			for (TopicAndPartition part : partitions) {
+				if (part.topic().equals(topic)) {
+					PartitionOffsetInfo offsetInfo = new PartitionOffsetInfo();
+					offsetInfo.setPartitionId(part.partition());
+					OffsetMetadataAndError result = fetchResponse.offsets().get(part);
+					short offsetFetchErrorCode = result.error();
+					if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
+						channel.disconnect();
+						String msg = "Offset could not be fetched, the used broker is not the coordinator for this consumer";
+						offsetInfo.setMessage(msg);
+						logger.warning(msg);
+						errorFound = true;
+						break;
+					} else if (offsetFetchErrorCode == ErrorMapping.OffsetsLoadInProgressCode()) {
+						logger.warning("Offset could not be fetched at this point, the offsets are not available yet");
+						try {
+							Thread.sleep(2000);
+						} catch (InterruptedException e) {
+							e.printStackTrace();
+						}
+						//Offsets are not available yet. Wait and try again
+						errorFound = true;
+						break;
+					} else if (result.error() != ErrorMapping.NoError()) {
+						String msg = MessageFormat.format("Offset could not be fetched at this point, an unknown error occured ( {0} )", result.error());
+						offsetInfo.setMessage(msg);
+						logger.warning(msg);
+					} else {
+						long offset = result.offset();
+						offsetInfo.setOffset(offset);
+					}
+
+					offsetResult.add(offsetInfo);
+				}
+			}
+			if (!errorFound) {
+				done = true;
+			}
+		}
+
+		if (channel.isConnected()) {
+			channel.disconnect();
+		}
+		return offsetResult;
+	}
+
+	public List<TopicMetadata> getMetadataForTopic(String zookeeperHost, String kafkaTopic) {
+		//connecting to a single broker should be enough because every single broker knows everything we need
+		for (String brokerHost : getBrokers(zookeeperHost)) {
+			brokerHost = brokerHost.replace("PLAINTEXT://", "");
+			String[] splitBroker = brokerHost.split(":");
+			String ip = splitBroker[0];
+			String port = splitBroker[1];
+
+			//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
+			SimpleConsumer consumer = null;
+			try {
+				consumer = new SimpleConsumer(ip, Integer.valueOf(port), 100000, 64 * 1024, "leaderLookup");
+				int retryCount = 0;
+				boolean done = false;
+				while (retryCount < 5 && !done) {
+					retryCount++;
+
+					List<String> topics = Collections.singletonList(kafkaTopic);
+					TopicMetadataRequest req = new TopicMetadataRequest(topics);
+					kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
+					List<TopicMetadata> metaData = resp.topicsMetadata();
+
+					boolean errorFound = false;
+					for (TopicMetadata item : metaData) {
+						if (item.topic().equals(kafkaTopic)) {
+							if (item.errorCode() == ErrorMapping.LeaderNotAvailableCode()) {
+								//wait and try again
+								errorFound = true;
+								try {
+									Thread.sleep(2000);
+								} catch (InterruptedException e) {
+									e.printStackTrace();
+								}
+								break;
+							}
+							return metaData;
+						}
+					}
+
+					if (!errorFound) {
+						done = true;
+					}
+				}
+			} finally {
+				if (consumer != null) {
+					consumer.close();
+				}
+			}
+		}
+		return null;
+	}
+
+	public List<Integer> getPartitionsForTopic(String zookeeperHost, String topic) {
+		ZkClient zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
+		Map<String, Seq<Object>> partitions = JavaConversions
+				.mapAsJavaMap(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false).getPartitionsForTopics(JavaConversions.asScalaBuffer(Arrays.asList(topic)).toList()));
+		List<Object> partitionObjList = JavaConversions.seqAsJavaList(partitions.entrySet().iterator().next().getValue());
+		List<Integer> partitionsList = new ArrayList<Integer>();
+		for (Object partObj : partitionObjList) {
+			partitionsList.add((Integer) partObj);
+		}
+		zkClient.close();
+		return partitionsList;
+	}
+
+	public List<KafkaPartitionInfo> getPartitionInfoForTopic(String zookeeperHost, String topic) {
+		List<TopicMetadata> topicInfos = getMetadataForTopic(zookeeperHost, topic);
+		List<KafkaPartitionInfo> partitionInfoList = new ArrayList<KafkaPartitionInfo>();
+		for (TopicMetadata topicInfo : topicInfos) {
+			for (PartitionMetadata part : topicInfo.partitionsMetadata()) {
+				KafkaPartitionInfo info = new KafkaPartitionInfo();
+				info.setPartitionId(part.partitionId());
+
+				List<BrokerNode> partitionNodes = new ArrayList<BrokerNode>();
+				for (BrokerEndPoint brokerPoint : part.isr()) {
+					BrokerNode node = new BrokerNode();
+					node.setHost(brokerPoint.connectionString());
+					node.setLeader(brokerPoint.connectionString().equals(part.leader().connectionString()));
+					partitionNodes.add(node);
+				}
+				info.setNodes(partitionNodes);
+				partitionInfoList.add(info);
+			}
+		}
+		//partitionInformation is collected, end loop and return
+		return partitionInfoList;
+	}
+
+	public List<Integer> getPartitionIdsForTopic(String zookeeperHost, String topic) {
+		List<TopicMetadata> metadata = getMetadataForTopic(zookeeperHost, topic);
+
+		List<Integer> partitionsList = new ArrayList<Integer>();
+		if (metadata != null && metadata.size() > 0) {
+			for (PartitionMetadata partData : metadata.get(0).partitionsMetadata()) {
+				partitionsList.add(partData.partitionId());
+			}
+		}
+
+		return partitionsList;
+	}
+
+	private BlockingChannel getOffsetManagerChannel(String zookeeperHost, String group) {
+		int correlationId = 0;
+		for (String broker : getBrokers(zookeeperHost)) {
+			String[] splitBroker = broker.split(":");
+			String ip = splitBroker[0];
+			String port = splitBroker[1];
+
+			int retryCount = 0;
+			//it is possible that a ConsumerCoordinator is not available yet, if this is the case we need to wait and try again.
+			while (retryCount < 5) {
+				retryCount++;
+
+				BlockingChannel channel = new BlockingChannel(ip, Integer.valueOf(port), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(),
+						5000 /* read timeout in millis */);
+				channel.connect();
+				channel.send(new GroupCoordinatorRequest(group, OffsetRequest.CurrentVersion(), correlationId++, CLIENT_ID));
+				GroupCoordinatorResponse metadataResponse = GroupCoordinatorResponse.readFrom(channel.receive().payload());
+
+				if (metadataResponse.errorCode() == ErrorMapping.NoError()) {
+					BrokerEndPoint endPoint = metadataResponse.coordinatorOpt().get();
+					if (!endPoint.host().equals(ip) && !port.equals(endPoint.port())) {
+						channel.disconnect();
+						channel = new BlockingChannel(endPoint.host(), endPoint.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000);
+						channel.connect();
+					}
+					return channel;
+				} else if (metadataResponse.errorCode() == ErrorMapping.ConsumerCoordinatorNotAvailableCode()
+						|| metadataResponse.errorCode() == ErrorMapping.OffsetsLoadInProgressCode()) {
+					//wait and try again
+					try {
+						Thread.sleep(2000);
+					} catch (InterruptedException e) {
+						e.printStackTrace();
+					}
+				} else {
+					//unknown error, continue with next broker
+					break;
+				}
+			}
+		}
+		throw new RuntimeException("Kafka Consumer Broker not available!");
+	}
+}
diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java
new file mode 100755
index 0000000..33c4ae0
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaProducerManager.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.util.Iterator;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.kafka.clients.producer.Callback;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.errors.TimeoutException;
+
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.MessageEncryption;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+
+public class KafkaProducerManager {
+
+	private final static Logger logger = Logger.getLogger(KafkaProducerManager.class.getName());
+	private static KafkaProducer<String, String> producer;
+
+	protected Properties getKafkaProducerConfig() {
+		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+		ODFInternalFactory f = new ODFInternalFactory();
+		Properties props = odfConfig.getKafkaProducerProperties();
+		String zookeeperConnect = f.create(Environment.class).getZookeeperConnectString();
+		final Iterator<String> brokers = f.create(KafkaMonitor.class).getBrokers(zookeeperConnect).iterator();
+		StringBuilder brokersString = new StringBuilder();
+		while (brokers.hasNext()) {
+			brokersString.append(brokers.next());
+			if (brokers.hasNext()) {
+				brokersString.append(",");
+			}
+		}
+		logger.info("Sending messages to brokers: " + brokersString.toString());
+		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+		props.put(ProducerConfig.CLIENT_ID_CONFIG, "ODF_MESSAGE_PRODUCER");
+		return props;
+	}
+
+	private KafkaProducer<String, String> getProducer() {
+		if (producer == null) {
+			producer = new KafkaProducer<String, String>(getKafkaProducerConfig());
+		}
+		return producer;
+	}
+
+	public void sendMsg(String topicName, String key, String value) {
+		MessageEncryption msgEncryption = new ODFInternalFactory().create(MessageEncryption.class);
+		value = msgEncryption.encrypt(value);
+		sendMsg(topicName, key, value, null);
+	}
+
+	public void sendMsg(final String topicName, final String key, final String value, final Callback callback) {
+		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, key, value);
+		try {
+			int retryCount = 0;
+			boolean msgSend = false;
+			while (retryCount < 5 && !msgSend) {
+				try {
+					getProducer().send(producerRecord, callback).get(4000, TimeUnit.MILLISECONDS);
+					msgSend = true;
+				} catch (ExecutionException ex) {
+					if (ex.getCause() instanceof TimeoutException) {
+						logger.warning("Message could not be send within 4000 ms");
+						retryCount++;
+					} else {
+						throw ex;
+					}
+
+				}
+			}
+			if (retryCount == 5) {
+				logger.warning("Message could not be send within 5 retries!");
+				logger.fine("topic: " + topicName + " key " + key + " msg " + value);
+			}
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "Exception while sending message", exc);
+			if (producer != null) {
+				producer.close();
+			}
+			producer = null;
+			throw new RuntimeException(exc);
+		}
+	}
+
+}
diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java
new file mode 100755
index 0000000..d0cf704
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueConsumer.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.text.MessageFormat;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.exception.ZkTimeoutException;
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.WakeupException;
+
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.messaging.MessageEncryption;
+
+import kafka.consumer.ConsumerTimeoutException;
+
+public class KafkaQueueConsumer implements ODFRunnable {
+	private Logger logger = Logger.getLogger(KafkaQueueConsumer.class.getName());
+	final static int POLLING_DURATION_MS = 100;
+	public static final int MAX_PROCESSING_EXCEPTIONS = 3;
+	public final static int MAX_CONSUMPTION_EXCEPTIONS = 5;
+	
+	public static interface ConsumptionCallback {
+		boolean stopConsumption();
+	}
+
+	private boolean ready = false;
+
+	private String topic;
+	private KafkaConsumer<String, String> kafkaConsumer;
+	private Properties config;
+	private boolean isShutdown = false;
+	private ExecutorService executorService;
+	private QueueMessageProcessor requestConsumer;
+	private int consumptionExceptionCount = 0;
+	private ConsumptionCallback consumptionCallback;
+
+	public KafkaQueueConsumer(String topicName, Properties config, QueueMessageProcessor requestConsumer) {
+		this(topicName, config, requestConsumer, null);
+	}
+	
+	public KafkaQueueConsumer(String topicName, Properties config, QueueMessageProcessor requestConsumer, ConsumptionCallback consumptionCallback) {
+		this.topic = topicName;
+		this.config = config;
+		this.requestConsumer = requestConsumer;
+		this.consumptionCallback = consumptionCallback;
+		if (this.consumptionCallback == null) {
+			this.consumptionCallback = new ConsumptionCallback() {
+
+				@Override
+				public boolean stopConsumption() {
+					// default: never stop
+					return false;
+				}
+				
+			};
+		}
+	}
+
+	public void run() {
+		final String groupId = this.config.getProperty("group.id");
+		while (consumptionExceptionCount < MAX_CONSUMPTION_EXCEPTIONS && !isShutdown) {
+			try {
+				logger.info("Starting consumption for " + groupId);
+				startConsumption();
+			} catch (RuntimeException ex) {
+				if (ex.getCause() instanceof WakeupException) {
+					isShutdown = true;
+				} else {
+					consumptionExceptionCount++;
+					logger.log(Level.WARNING, "Caught exception in KafkaQueueConsumer " + groupId + ", restarting consumption!", ex);
+				}
+				if (this.kafkaConsumer != null) {
+					this.kafkaConsumer.close();
+					this.kafkaConsumer = null;
+				}
+			} catch (Exception e) {
+				consumptionExceptionCount++;
+				logger.log(Level.WARNING, "Caught exception in KafkaQueueConsumer " + groupId + ", restarting consumption!", e);
+				if (this.kafkaConsumer != null) {
+					this.kafkaConsumer.close();
+					this.kafkaConsumer = null;
+				}
+			}
+		}
+		logger.info("Enough consumption for " + groupId);
+		this.ready = false;
+		this.cancel();
+	}
+
+	private void startConsumption() {
+		if (this.consumptionCallback.stopConsumption()) {
+			return;
+		}
+		Exception caughtException = null;
+		final String logPrefix = this + " consumer: [" + this.requestConsumer.getClass().getSimpleName() + "], on " + topic + ": ";
+		try {
+			if (this.kafkaConsumer == null) {
+				logger.fine(logPrefix + " create new consumer for topic " + topic);
+				try {
+					this.kafkaConsumer = new KafkaConsumer<String, String>(config);
+					kafkaConsumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
+
+						@Override
+						public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
+							logger.fine(logPrefix + " partitions revoked " + topic + " new partitions: " + partitions.size());
+						}
+
+						@Override
+						public void onPartitionsAssigned(Collection<TopicPartition> partitions) {						
+							logger.finer(logPrefix + " partitions assigned " + topic + " , new partitions: " + partitions.size());
+							logger.info(logPrefix + "consumer is ready with " + partitions.size() + " partitions assigned");
+							ready = true;
+						}
+					});
+				} catch (ZkTimeoutException zkte) {
+					String zkHosts = config.getProperty("zookeeper.connect");
+					logger.log(Level.SEVERE, logPrefix + " Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
+					throw zkte;
+				}
+			}
+			logger.log(Level.INFO, logPrefix + " Consumer " + "''{1}'' is now listening on ODF queue ''{0}'' with configuration {2}", new Object[] { topic, requestConsumer, config });
+			MessageEncryption msgEncryption = new ODFInternalFactory().create(MessageEncryption.class);
+			while (!Thread.interrupted() && !isShutdown && kafkaConsumer != null) {
+				if (this.consumptionCallback.stopConsumption()) {
+					isShutdown = true;
+					break;
+				}
+				ConsumerRecords<String, String> records = kafkaConsumer.poll(POLLING_DURATION_MS);
+				kafkaConsumer.commitSync(); // commit offset immediately to avoid timeouts for long running processors
+				for (TopicPartition partition : kafkaConsumer.assignment()) {
+					List<ConsumerRecord<String, String>> polledRecords = records.records(partition);
+					//		logger.log(Level.FINEST, logPrefix + "Polling finished got {0} results, continue processing? {1}", new Object[] { polledRecords.size(), continueProcessing });
+					if (!polledRecords.isEmpty()) {
+						logger.fine(polledRecords.get(0).value() + " offset: " + polledRecords.get(0).offset());
+					}
+
+					for (int no = 0; no < polledRecords.size(); no++) {
+						ConsumerRecord<String, String> record = polledRecords.get(no);
+						String s = record.value();
+						logger.log(Level.FINEST, logPrefix + "Decrypting message {0}", s);
+						try {
+							s = msgEncryption.decrypt(s);
+						} catch (Exception exc) {
+							logger.log(Level.WARNING, "Message could not be decrypted, ignoring it", exc);
+							s = null;
+						}
+						if (s != null) {
+							logger.log(Level.FINEST, logPrefix + "Sending message to consumer ''{0}''", s);
+							int exceptionCount = 0;
+							boolean processedSuccessfully = false;
+							while (exceptionCount < MAX_PROCESSING_EXCEPTIONS && !processedSuccessfully) {
+								try {
+									exceptionCount++;
+									this.requestConsumer.process(executorService, s, record.partition(), record.offset());
+									processedSuccessfully = true;
+								} catch (Exception ex) {
+									logger.warning("Exception " + exceptionCount + " caught processing message!");
+								}
+							}
+						}
+					}
+				}
+			}
+		} catch (ConsumerTimeoutException e) {
+			String msg = MessageFormat.format(" Caught timeout on queue ''{0}''", topic);
+			logger.log(Level.WARNING, logPrefix + msg, e);
+			caughtException = e;
+		} catch (Exception exc) {
+			String msg = MessageFormat.format(" Caught exception on queue ''{0}''", topic);
+			logger.log(Level.WARNING, logPrefix + msg, exc);
+			caughtException = exc;
+		} finally {
+			if (kafkaConsumer != null) {
+				logger.log(Level.FINE, logPrefix + "Closing consumer " + " on topic ''{0}''", topic);
+				kafkaConsumer.close();
+				logger.log(Level.FINE, logPrefix + "Closed consumer " + " on topic ''{0}''", topic);
+				kafkaConsumer = null;
+			}
+		}
+		logger.log(Level.INFO, logPrefix + "Finished consumer on topic ''{0}''", topic);
+		if (caughtException != null) {
+			caughtException.printStackTrace();
+			throw new RuntimeException(caughtException);
+		}
+	}
+
+	public void cancel() {
+		logger.log(Level.INFO, "Shutting down consumer on topic ''{0}''", topic);
+		if (this.kafkaConsumer != null) {
+			this.kafkaConsumer.wakeup();
+		}
+		isShutdown = true;
+	}
+
+	public boolean isShutdown() {
+		return isShutdown;
+	}
+
+	@Override
+	public void setExecutorService(ExecutorService service) {
+		this.executorService = service;
+	}
+
+	@Override
+	public boolean isReady() {
+		return ready;
+	}
+
+}
diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java
new file mode 100755
index 0000000..e759ecc
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaQueueManager.java
@@ -0,0 +1,488 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.I0Itec.zkclient.exception.ZkTimeoutException;
+import org.apache.atlas.odf.api.OpenDiscoveryFramework;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.engine.KafkaGroupOffsetInfo;
+import org.apache.atlas.odf.api.engine.KafkaStatus;
+import org.apache.atlas.odf.api.engine.KafkaTopicStatus;
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager;
+import org.apache.atlas.odf.core.notification.NotificationListener;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.engine.MessagingStatus;
+import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.controlcenter.AdminMessage;
+import org.apache.atlas.odf.core.controlcenter.AdminQueueProcessor;
+import org.apache.atlas.odf.core.controlcenter.ConfigChangeQueueProcessor;
+import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore.StatusQueueProcessor;
+import org.apache.atlas.odf.core.controlcenter.DiscoveryServiceStarter;
+import org.apache.atlas.odf.core.controlcenter.ExecutorServiceFactory;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntimes;
+import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
+import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
+import org.apache.atlas.odf.core.notification.NotificationManager;
+
+import kafka.admin.AdminUtils;
+import kafka.admin.RackAwareMode;
+import kafka.common.TopicExistsException;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+
+public class KafkaQueueManager implements DiscoveryServiceQueueManager {
+
+	public static final String TOPIC_NAME_STATUS_QUEUE = "odf-status-topic";
+	public static final String TOPIC_NAME_ADMIN_QUEUE = "odf-admin-topic";
+	public static final String ADMIN_QUEUE_KEY = "odf-admin-queue-key";
+	public static final String SERVICE_TOPIC_PREFIX = "odf-topic-";
+
+	public static final RackAwareMode DEFAULT_RACK_AWARE_MODE = RackAwareMode.Disabled$.MODULE$;
+	
+	//use static UUID so that no unnecessary consumer threads are started
+	private final static String UNIQUE_SESSION_THREAD_ID = UUID.randomUUID().toString();
+
+	private final static int THREAD_STARTUP_TIMEOUT_MS = 5000;
+	
+	private static List<String> queueConsumerNames = null;
+	private static Object startLock = new Object();
+
+	private final static Logger logger = Logger.getLogger(KafkaQueueManager.class.getName());
+
+	private ThreadManager threadManager;
+	private SettingsManager odfConfig;
+	private String zookeeperConnectString;
+
+	public KafkaQueueManager() {
+		ODFInternalFactory factory = new ODFInternalFactory();
+		threadManager = factory.create(ThreadManager.class);
+		ExecutorServiceFactory esf = factory.create(ExecutorServiceFactory.class);
+		threadManager.setExecutorService(esf.createExecutorService());
+		zookeeperConnectString = factory.create(Environment.class).getZookeeperConnectString();
+		odfConfig = factory.create(SettingsManager.class);
+	}
+	
+	
+	public Properties getConsumerConfigProperties(String consumerGroupID, boolean consumeFromEnd) {
+		Properties kafkaConsumerProps = odfConfig.getKafkaConsumerProperties();
+		kafkaConsumerProps.put("group.id", consumerGroupID);
+		if (zookeeperConnectString != null) {
+			kafkaConsumerProps.put("zookeeper.connect", zookeeperConnectString);
+		}
+		if (consumeFromEnd) {
+			kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
+		} else {
+			kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+		}
+		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());
+		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);		
+		return kafkaConsumerProps;
+	}
+
+	private String getBootstrapServers() {
+		final List<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnectString);
+		StringBuilder servers = new StringBuilder();
+		final Iterator<String> iterator = brokers.iterator();
+		while(iterator.hasNext()){
+			servers.append(iterator.next());
+			if(iterator.hasNext()){
+				servers.append(",");
+			}
+		}
+		return servers.toString();
+	}
+
+	protected void createTopicIfNotExists(String topicName, int partitionCount, Properties props) {
+		String zkHosts = props.getProperty("zookeeper.connect");
+		ZkClient zkClient = null;
+		try {
+			zkClient = new ZkClient(zkHosts, Integer.valueOf(props.getProperty("zookeeperSessionTimeoutMs")),
+					Integer.valueOf(props.getProperty("zookeeperConnectionTimeoutMs")), ZKStringSerializer$.MODULE$);
+		} catch (ZkTimeoutException zkte) {
+			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
+		}
+		try {
+			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
+			// using partition size 1 and replication size 1, no special
+			// per-topic config needed
+			try {
+				final ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts), false);
+				if (!AdminUtils.topicExists(zkUtils, topicName)) {
+					logger.log(Level.INFO, "Topic ''{0}'' does not exist, creating it", topicName);
+
+					//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
+					KafkaMessagingConfiguration kafkaConfig = ((KafkaMessagingConfiguration) odfConfig.getODFSettings().getMessagingConfiguration());
+					AdminUtils.createTopic(zkUtils, topicName, partitionCount, kafkaConfig.getKafkaBrokerTopicReplication(),
+							new Properties(), DEFAULT_RACK_AWARE_MODE);
+					logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
+					//wait before continuing to make sure the topic exists BEFORE consumers are started
+					try {
+						Thread.sleep(1500);
+					} catch (InterruptedException e) {
+						// TODO Auto-generated catch block
+						e.printStackTrace();
+					}
+				}
+			} catch (TopicExistsException ex) {
+				logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
+			}
+		} finally {
+			if (zkClient != null) {
+				zkClient.close();
+			}
+		}
+	}
+
+
+	private String getTopicName(ServiceRuntime runtime) {
+		return "odf-runtime-" + runtime.getName();
+	}
+	
+	private String getConsumerGroup(ServiceRuntime runtime) {
+		return getTopicName(runtime) + "_group";
+	}
+	
+	private List<ThreadStartupResult> scheduleAllRuntimeConsumers() {
+		List<ThreadStartupResult> results = new ArrayList<>();
+		for (ServiceRuntime runtime : ServiceRuntimes.getActiveRuntimes()) {
+			results.addAll(scheduleRuntimeConsumers(runtime));
+		}
+		return results;
+	}
+	
+	private List<ThreadStartupResult> scheduleRuntimeConsumers(ServiceRuntime runtime) {
+		logger.log(Level.FINER, "Create consumers on queue for runtime ''{0}'' if it doesn't already exist", runtime.getName());
+
+		String topicName = getTopicName(runtime);
+		String consumerGroupId = getConsumerGroup(runtime);
+		Properties kafkaConsumerProps = getConsumerConfigProperties(consumerGroupId, false); // read entries from beginning if consumer was never initialized 
+		String threadName = "RuntimeQueueConsumer" + topicName;
+		List<ThreadStartupResult> result = new ArrayList<ThreadStartupResult>();
+		if (threadManager.getStateOfUnmanagedThread(threadName) != ThreadStatus.ThreadState.RUNNING) {
+			createTopicIfNotExists(topicName, 1, kafkaConsumerProps);
+			ThreadStartupResult startupResult = threadManager.startUnmanagedThread(threadName, new KafkaRuntimeConsumer(runtime, topicName, kafkaConsumerProps, new DiscoveryServiceStarter()));
+			result.add(startupResult);		
+		} else {
+			result.add(new ThreadStartupResult(threadName) {
+				@Override
+				public boolean isNewThreadCreated() {
+					return false;
+				}
+
+				@Override
+				public boolean isReady() {
+					return true;
+				}
+			});
+		}
+		return result;
+	}
+
+	
+	private List<ThreadStartupResult> scheduleConsumerThreads(String topicName, int partitionCount, Properties kafkaConsumerProps, String threadName,
+			List<QueueMessageProcessor> processors) {
+		if (processors.size() != partitionCount) {
+			final String msg = "The number of processors must be equal to the partition count in order to support parallel processing";
+			logger.warning(msg);
+			throw new RuntimeException(msg);
+		}
+		createTopicIfNotExists(topicName, partitionCount, kafkaConsumerProps);
+
+		List<ThreadStartupResult> result = new ArrayList<ThreadStartupResult>();
+		for (int no = 0; no < partitionCount; no++) {
+			if (threadManager.getStateOfUnmanagedThread(threadName + "_" + no) != ThreadStatus.ThreadState.RUNNING) {
+				QueueMessageProcessor processor = processors.get(no);
+				ThreadStartupResult created = threadManager.startUnmanagedThread(threadName + "_" + no, new KafkaQueueConsumer(topicName, kafkaConsumerProps, processor));
+				if (created.isNewThreadCreated()) {
+					logger.log(Level.INFO, "Created new consumer thread on topic ''{0}'' with group ID ''{1}'', thread name: ''{2}'', properties: ''{3}''",
+							new Object[] { topicName, kafkaConsumerProps.getProperty("group.id"), threadName + "_" + no, kafkaConsumerProps.toString() });
+				} else {
+					logger.log(Level.FINE, "Consumer thread with thread name: ''{0}'' already exists, doing nothing", new Object[] { threadName + "_" + no });
+				}
+				result.add(created);
+			} else {
+				result.add(new ThreadStartupResult(threadName) {
+					@Override
+					public boolean isNewThreadCreated() {
+						return false;
+					}
+
+					@Override
+					public boolean isReady() {
+						return true;
+					}
+				});
+			}
+		}
+		return result;
+	}
+
+	private ThreadStartupResult scheduleConsumerThread(String topicName, Properties kafkaConsumerProps, String threadName, QueueMessageProcessor processor) {
+		return scheduleConsumerThreads(topicName, 1, kafkaConsumerProps, threadName, Arrays.asList(processor)).get(0);
+	}
+
+	@Override
+	public void enqueue(AnalysisRequestTracker tracker) {
+		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+		if (dsRequest == null) {
+			throw new RuntimeException("Tracker is finished, should not be enqueued");
+		}
+		String dsID = dsRequest.getDiscoveryServiceId();
+		dsRequest.setPutOnRequestQueue(System.currentTimeMillis());
+		ServiceRuntime runtime = ServiceRuntimes.getRuntimeForDiscoveryService(dsID);
+		if (runtime == null) {
+			throw new RuntimeException(MessageFormat.format("Service runtime for service ''{0}'' was not found.", dsID));
+		}
+		enqueueJSONMessage(getTopicName(runtime), tracker, tracker.getRequest().getId());
+	}
+
+	private void enqueueJSONMessage(String topicName, Object jsonObject, String key) {
+		String value = null;
+		try {
+			value = JSONUtils.toJSON(jsonObject);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+		new ODFInternalFactory().create(KafkaProducerManager.class).sendMsg(topicName, key, value);
+	}
+
+	List<ThreadStartupResult> scheduleStatusQueueConsumers() {
+		logger.log(Level.FINER, "Create consumers on status queue if they don't already exist");
+		List<ThreadStartupResult> results = new ArrayList<ThreadStartupResult>();
+
+		// create consumer thread for the status watcher of all trackes
+		String statusWatcherConsumerGroupID = "DSStatusWatcherConsumerGroup" + UNIQUE_SESSION_THREAD_ID; // have a new group id on each node that reads all from the beginning
+		// always read from beginning for the status queue
+		Properties statusWatcherKafkaConsumerProps = getConsumerConfigProperties(statusWatcherConsumerGroupID, false);
+		final String statusWatcherThreadName = "StatusWatcher" + TOPIC_NAME_STATUS_QUEUE; // a fixed name
+		String threadNameWithPartition = statusWatcherThreadName + "_0";
+		final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadNameWithPartition);
+		logger.fine("State of status watcher thread: " + stateOfUnmanagedThread);
+		if (stateOfUnmanagedThread != ThreadStatus.ThreadState.RUNNING) {
+			final ThreadStartupResult scheduleConsumerThread = scheduleConsumerThread(TOPIC_NAME_STATUS_QUEUE, statusWatcherKafkaConsumerProps, statusWatcherThreadName,
+					new StatusQueueProcessor());
+			results.add(scheduleConsumerThread);
+		} else {
+			results.add(new ThreadStartupResult(statusWatcherThreadName) {
+				@Override
+				public boolean isNewThreadCreated() {
+					return false;
+				}
+
+				@Override
+				public boolean isReady() {
+					return true;
+				}
+			});
+		}
+
+		return results;
+	}
+
+
+	@Override
+	public void enqueueInStatusQueue(StatusQueueEntry sqe) {
+		enqueueJSONMessage(TOPIC_NAME_STATUS_QUEUE, sqe, StatusQueueEntry.getRequestId(sqe));
+	}
+
+
+	private List<ThreadStartupResult> scheduleAdminQueueConsumers() {
+		List<ThreadStartupResult> results = new ArrayList<ThreadStartupResult>();
+		//schedule admin queue consumers
+		// consumer group so that every node receives events
+		String adminWatcherConsumerGroupID = "DSAdminQueueConsumerGroup" + UNIQUE_SESSION_THREAD_ID; // have a new group id on each node 
+		Properties adminWatcherKafkaConsumerProps = getConsumerConfigProperties(adminWatcherConsumerGroupID, true);
+		final String adminWatcherThreadName = "AdminWatcher" + TOPIC_NAME_ADMIN_QUEUE;
+		String threadNameWithPartition = adminWatcherThreadName + "_0";
+		if (threadManager.getStateOfUnmanagedThread(threadNameWithPartition) != ThreadStatus.ThreadState.RUNNING) {
+			results.add(scheduleConsumerThread(TOPIC_NAME_ADMIN_QUEUE, adminWatcherKafkaConsumerProps, adminWatcherThreadName, new AdminQueueProcessor()));
+			// consumer group so only one node receives events
+			String distributedAdminConsumerGroup = "DSAdminQueueConsumerGroupCommon";
+			Properties kafkaProps = getConsumerConfigProperties(distributedAdminConsumerGroup, true);
+			final String threadName = "DistributedAdminWatcher";
+			results.add(scheduleConsumerThread(TOPIC_NAME_ADMIN_QUEUE, kafkaProps, threadName, new ConfigChangeQueueProcessor()));
+		} else {
+			results.add(new ThreadStartupResult(adminWatcherThreadName) {
+				@Override
+				public boolean isNewThreadCreated() {
+					return false;
+				}
+
+				@Override
+				public boolean isReady() {
+					return true;
+				}
+			});
+		}
+		return results;
+	}
+
+	@Override
+	public void enqueueInAdminQueue(AdminMessage message) {
+		enqueueJSONMessage(TOPIC_NAME_ADMIN_QUEUE, message, ADMIN_QUEUE_KEY);
+	}
+
+	@Override
+	public void start() throws TimeoutException {
+		synchronized (startLock) {
+			if (queueConsumerNames == null) {
+				List<ThreadStartupResult> results = new ArrayList<>();
+				results.addAll(scheduleStatusQueueConsumers());
+				results.addAll(scheduleAdminQueueConsumers());
+				results.addAll(scheduleAllRuntimeConsumers());
+				results.addAll(scheduleNotificationListenerThreads());
+				List<String> consumerNames = new ArrayList<>();
+				for (ThreadStartupResult tsr : results) {
+					consumerNames.add(tsr.getThreadId());
+				}
+				queueConsumerNames = consumerNames;
+				this.threadManager.waitForThreadsToBeReady(THREAD_STARTUP_TIMEOUT_MS * results.size(), results);
+				logger.info("KafkaQueueManager successfully initialized");
+			}
+		}
+	}
+	
+	public void stop() {
+		synchronized (startLock) {
+			if (queueConsumerNames != null) {
+				threadManager.shutdownThreads(queueConsumerNames);
+				queueConsumerNames = null;
+			}
+		}
+	}
+
+	@Override
+	public MessagingStatus getMessagingStatus() {
+		KafkaStatus status = new KafkaStatus();
+		KafkaMonitor monitor = new ODFInternalFactory().create(KafkaMonitor.class);
+		status.setBrokers(monitor.getBrokers(zookeeperConnectString));
+
+		List<String> topics = new ArrayList<String>(Arrays.asList(KafkaQueueManager.TOPIC_NAME_ADMIN_QUEUE, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE));
+		for (DiscoveryServiceProperties info : new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()) {
+			topics.add(KafkaQueueManager.SERVICE_TOPIC_PREFIX + info.getId());
+		}
+
+		List<KafkaTopicStatus> topicStatusList = new ArrayList<KafkaTopicStatus>();
+		for (String topic : topics) {
+			KafkaTopicStatus topicStatus = getTopicStatus(topic, monitor);
+			topicStatusList.add(topicStatus);
+		}
+		status.setTopicStatus(topicStatusList);
+		return status;
+	}
+
+	private KafkaTopicStatus getTopicStatus(String topic, KafkaMonitor monitor) {
+		KafkaTopicStatus topicStatus = new KafkaTopicStatus();
+		topicStatus.setTopic(topic);
+		topicStatus.setBrokerPartitionMessageInfo(monitor.getMessageCountForTopic(zookeeperConnectString, topic));
+
+		List<KafkaGroupOffsetInfo> offsetInfoList = new ArrayList<KafkaGroupOffsetInfo>();
+		List<String> consumerGroupsFromZookeeper = monitor.getConsumerGroups(zookeeperConnectString, topic);
+		for (String group : consumerGroupsFromZookeeper) {
+			KafkaGroupOffsetInfo offsetInfoContainer = new KafkaGroupOffsetInfo();
+			offsetInfoContainer.setGroupId(group);
+			List<PartitionOffsetInfo> offsetsForTopic = monitor.getOffsetsForTopic(zookeeperConnectString, group, topic);
+			for (PartitionOffsetInfo info : offsetsForTopic) {
+				// to reduce clutter, only if at least 1 partition has an offset > -1 (== any offset) for this consumer group, 
+				// it will be included in the result
+				if (info.getOffset() > -1) {
+					offsetInfoContainer.setOffsets(offsetsForTopic);
+					offsetInfoList.add(offsetInfoContainer);
+					break;
+				}
+			}
+		}
+		topicStatus.setConsumerGroupOffsetInfo(offsetInfoList);
+
+		topicStatus.setPartitionBrokersInfo(monitor.getPartitionInfoForTopic(zookeeperConnectString, topic));
+		return topicStatus;
+	}
+
+	private List<ThreadStartupResult> scheduleNotificationListenerThreads() {
+		NotificationManager nm = new ODFInternalFactory().create(NotificationManager.class);
+		List<NotificationListener> listeners = nm.getListeners();
+		List<ThreadStartupResult> result = new ArrayList<>();
+		if (listeners == null) {
+			return result;
+		}
+		final OpenDiscoveryFramework odf = new ODFFactory().create();
+		for (final NotificationListener listener : listeners) {
+			String topicName = listener.getTopicName();
+			String consumerGroupId = "ODFNotificationGroup" + topicName;
+			Properties kafkaConsumerProps = getConsumerConfigProperties(consumerGroupId, true);  
+			String threadName = "NotificationListenerThread" + topicName;
+			if (threadManager.getStateOfUnmanagedThread(threadName) != ThreadStatus.ThreadState.RUNNING) {
+				KafkaQueueConsumer consumer = new KafkaQueueConsumer(topicName, kafkaConsumerProps, new QueueMessageProcessor() {
+					
+					@Override
+					public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
+						try {
+							listener.onEvent(msg, odf);
+						} catch(Exception exc) {
+							String errorMsg = MessageFormat.format("Notification listsner ''{0}'' has thrown an exception. Ignoring it", listener.getName());
+							logger.log(Level.WARNING, errorMsg, exc);
+						}
+					}
+				});
+				ThreadStartupResult startupResult = threadManager.startUnmanagedThread(threadName, consumer);
+				result.add(startupResult);		
+			} else {
+				result.add(new ThreadStartupResult(threadName) {
+					@Override
+					public boolean isNewThreadCreated() {
+						return false;
+					}
+
+					@Override
+					public boolean isReady() {
+						return true;
+					}
+				});
+			}
+		}
+		return result;
+	}
+	
+}
diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java
new file mode 100755
index 0000000..73d98e7
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/KafkaRuntimeConsumer.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.controlcenter.ServiceRuntime;
+
+/**
+ * This consumer is started for a certain runtime and starts a KafkaQueueConsumer if
+ * the runtime is available. 
+ * 
+ *
+ */
+public class KafkaRuntimeConsumer implements ODFRunnable {
+
+	Logger logger = Logger.getLogger(KafkaRuntimeConsumer.class.getName());
+
+	private ServiceRuntime runtime;
+	private boolean isShutdown = false;
+	private ExecutorService executorService = null;
+	private KafkaQueueConsumer kafkaQueueConsumer = null;
+
+	private String topic;
+	private Properties config;
+	private QueueMessageProcessor processor;
+
+	private KafkaQueueConsumer.ConsumptionCallback callback = new KafkaQueueConsumer.ConsumptionCallback() {
+		@Override
+		public boolean stopConsumption() {
+			return isShutdown || (runtime.getWaitTimeUntilAvailable() > 0);
+		}
+	};
+
+	public KafkaRuntimeConsumer(ServiceRuntime runtime, String topicName, Properties config, QueueMessageProcessor processor) {
+		this.runtime = runtime;
+		this.processor = processor;
+		this.topic = topicName;
+		this.config = config;
+	}
+
+	@Override
+	public void run() {
+		logger.log(Level.INFO, "Starting runtime consumer for topic ''{0}''", topic);
+		while (!isShutdown) {
+			long waitTime = runtime.getWaitTimeUntilAvailable();
+			if (waitTime <= 0) {
+				logger.log(Level.INFO, "Starting Kafka consumer for topic ''{0}''", topic);
+				kafkaQueueConsumer = new KafkaQueueConsumer(topic, config, processor, callback);
+				kafkaQueueConsumer.setExecutorService(executorService);
+				// run consumer synchronously
+				kafkaQueueConsumer.run();
+				logger.log(Level.INFO, "Kafka consumer for topic ''{0}'' is finished", topic);
+
+				// if we are here, this means that the consumer was cancelled
+				// either directly or (more likely) through the Consumption callback 
+				kafkaQueueConsumer = null;
+			} else {
+				try {
+					logger.log(Level.FINER, "Runtime ''{0}'' is not available, waiting for ''{1}''ms", new Object[]{runtime.getName(), waitTime });
+					Thread.sleep(waitTime);
+				} catch (InterruptedException e) {
+					throw new RuntimeException(e);
+				}
+			}
+		}
+		logger.log(Level.INFO, "Kafka runtime consumer for topic ''{0}'' has shut down", topic);
+	}
+
+	@Override
+	public void setExecutorService(ExecutorService executorService) {
+		this.executorService = executorService;
+	}
+
+	@Override
+	public void cancel() {
+		isShutdown = true;
+		if (kafkaQueueConsumer != null) {
+			kafkaQueueConsumer.cancel();
+		}
+	}
+
+	@Override
+	public boolean isReady() {
+		return true;
+	}
+
+}
diff --git a/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java
new file mode 100755
index 0000000..9c08f3a
--- /dev/null
+++ b/odf/odf-messaging/src/main/java/org/apache/atlas/odf/core/messaging/kafka/MessageSearchConsumer.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.messaging.kafka;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.exception.ZkTimeoutException;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.controlcenter.ODFRunnable;
+import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
+
+public class MessageSearchConsumer implements ODFRunnable {
+	private static final long POLLING_DURATION_MS = 100;
+	private static final int MAX_POLL_COUNT = 5;
+
+	private Logger logger = Logger.getLogger(MessageSearchConsumer.class.getName());
+	private SearchCompletedCallback searchCompletedCallback;
+	private List<String> searchStrings;
+	protected String topic;
+	private KafkaConsumer<String, String> kafkaConsumer;
+	private boolean shutdown;
+	private boolean ready = false;
+	private List<PartitionOffsetInfo> maxOffsetsForTopic = new ArrayList<PartitionOffsetInfo>();
+
+
+	public MessageSearchConsumer(String topic, SearchCompletedCallback completitionCallback, List<String> searchStrings) {
+		setTopic(topic);
+		setSearchStrings(searchStrings);
+		setCompletitionCallback(completitionCallback);
+	}
+
+	public MessageSearchConsumer() {
+	}
+
+	protected List<PartitionOffsetInfo> retrieveTopicOffsets() {
+		List<PartitionOffsetInfo> offsetsForTopic = new ArrayList<PartitionOffsetInfo>();
+		String zookeeperConnect = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+
+		if (zookeeperConnect != null) {
+			final KafkaMonitor create = new ODFInternalFactory().create(KafkaMonitor.class);
+			for (int part : create.getPartitionsForTopic(zookeeperConnect, this.topic)) {
+				offsetsForTopic.add(create.getOffsetsOfLastMessagesForTopic(zookeeperConnect, this.topic, part));
+			}
+		}
+		return offsetsForTopic;
+	}
+
+	public void setTopic(String topic) {
+		this.topic = topic;
+	}
+
+	public void setSearchStrings(List<String> searchStrings) {
+		this.searchStrings = searchStrings;
+	}
+
+	public void setCompletitionCallback(SearchCompletedCallback completitionCallback) {
+		this.searchCompletedCallback = completitionCallback;
+	}
+
+	protected Properties getKafkaConsumerProperties() {
+		Properties consumerProperties = new ODFFactory().create().getSettingsManager().getKafkaConsumerProperties();
+		consumerProperties.put("group.id", UUID.randomUUID().toString() + "_searchConsumer");
+		final String zookeeperConnect = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+		consumerProperties.put("zookeeper.connect", zookeeperConnect);
+		consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+		consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnect).iterator();
+		StringBuilder brokersString = new StringBuilder();
+		while (brokers.hasNext()) {
+			brokersString.append(brokers.next());
+			if (brokers.hasNext()) {
+				brokersString.append(",");
+			}
+		}
+		consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+		consumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
+		return consumerProperties;
+	}
+
+	@Override
+	public void run() {
+		this.maxOffsetsForTopic = retrieveTopicOffsets();
+		final String logPrefix = "Consumer for topic " + topic + ": ";
+		try {
+
+			Map<Integer, Boolean> maxOffsetReachedMap = new HashMap<Integer, Boolean>();
+			if (maxOffsetsForTopic.isEmpty()) {
+				logger.info("No offsets found for topic " + this.topic + ", therefore no matching messages can be found");
+				if (searchCompletedCallback != null) {
+					searchCompletedCallback.onDoneSearching(new HashMap<String, PartitionOffsetInfo>());
+					return;
+				}
+			}
+			for (PartitionOffsetInfo info : maxOffsetsForTopic) {
+				//if the max offset is -1, no message exists on the partition
+				if (info.getOffset() > -1) {
+					maxOffsetReachedMap.put(info.getPartitionId(), false);
+				}
+			}
+
+			Map<String, PartitionOffsetInfo> resultMap = new HashMap<String, PartitionOffsetInfo>();
+
+			Properties consumerProperties = getKafkaConsumerProperties();
+
+			if (this.kafkaConsumer == null) {
+				logger.fine(logPrefix + " create new consumer for topic " + topic);
+				try {
+					this.kafkaConsumer = new KafkaConsumer<String, String>(consumerProperties);
+					//In order to prevent other consumers from getting assigned this partition during a rebalance, the partition(s) MUST be assigned manually (not using auto assign because of subscribe())
+					kafkaConsumer.subscribe(Arrays.asList(topic));
+				} catch (ZkTimeoutException zkte) {
+					String zkHosts = consumerProperties.getProperty("zookeeper.connect");
+					logger.log(Level.SEVERE, logPrefix + " Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zkHosts);
+					throw zkte;
+				}
+			}
+			logger.log(Level.INFO, logPrefix + " Consumer " + "''{1}'' is now listening on ODF queue ''{0}'' with configuration {2}",
+					new Object[] { topic, kafkaConsumer, consumerProperties });
+
+			int pollCount = 0;
+			while (!Thread.interrupted() && pollCount < MAX_POLL_COUNT && !shutdown && kafkaConsumer != null) {
+				logger.info("searching ...");
+				pollCount++;
+				ConsumerRecords<String, String> records = kafkaConsumer.poll(POLLING_DURATION_MS);
+				ready = true;
+				final Iterator<ConsumerRecord<String, String>> polledRecords = records.records(topic).iterator();
+				
+				while (polledRecords.hasNext() && !shutdown) {
+					final ConsumerRecord<String, String> next = polledRecords.next();
+					for (String s : searchStrings) {
+						if ((next.key() != null && next.key().equals(s)) || (next.value() != null && next.value().contains(s))) {
+							final PartitionOffsetInfo position = new PartitionOffsetInfo();
+							position.setOffset(next.offset());
+							position.setPartitionId(next.partition());
+							resultMap.put(s, position);
+						}
+					}
+
+					if (next.offset() == maxOffsetsForTopic.get(next.partition()).getOffset()) {
+						maxOffsetReachedMap.put(next.partition(), true);
+					}
+
+					boolean allCompleted = true;
+					for (Entry<Integer, Boolean> entry : maxOffsetReachedMap.entrySet()) {
+						if (!entry.getValue()) {
+							allCompleted = false;
+							break;
+						}
+					}
+
+					if (allCompleted) {
+						logger.info("Done searching all messages");
+						if (searchCompletedCallback != null) {
+							searchCompletedCallback.onDoneSearching(resultMap);
+							return;
+						}
+						shutdown = true;
+					}
+				}
+			}
+		} catch (Exception exc) {
+			String msg = MessageFormat.format(" Caught exception on queue ''{0}''", topic);
+			logger.log(Level.WARNING, logPrefix + msg, exc);
+		} finally {
+			if (kafkaConsumer != null) {
+				logger.log(Level.FINE, logPrefix + "Closing consumer " + " on topic ''{0}''", topic);
+				kafkaConsumer.close();
+				logger.log(Level.FINE, logPrefix + "Closed consumer " + " on topic ''{0}''", topic);
+				kafkaConsumer = null;
+			}
+		}
+		logger.log(Level.FINE, logPrefix + "Finished consumer on topic ''{0}''", topic);
+	}
+
+	@Override
+	public void setExecutorService(ExecutorService service) {
+
+	}
+
+	@Override
+	public void cancel() {
+		this.shutdown = true;
+	}
+
+	@Override
+	public boolean isReady() {
+		return ready;
+	}
+
+	public interface SearchCompletedCallback {
+		void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap);
+	}
+}
diff --git a/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..1c96170
--- /dev/null
+++ b/odf/odf-messaging/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,14 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager=org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java
new file mode 100755
index 0000000..396193f
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueConsumerExceptionTest.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.I0Itec.zkclient.exception.ZkTimeoutException;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.engine.ThreadStatus.ThreadState;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+
+import kafka.admin.AdminUtils;
+import kafka.common.TopicExistsException;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+
+public class KafkaQueueConsumerExceptionTest extends ODFTestcase {
+	static Logger logger = ODFTestLogger.get();
+	static final String topicName = "my_dummy_test_topic";
+	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+
+	@BeforeClass
+	public static void setupTopic() {
+		ZkClient zkClient = null;
+		try {
+			zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
+			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
+			// using partition size 1 and replication size 1, no special
+			// per-topic config needed
+			logger.log(Level.FINE, "Topic ''{0}'' does not exist, creating it", topicName);
+			//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
+			AdminUtils.createTopic(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false), topicName, 1, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
+			logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
+		} catch (TopicExistsException ex) {
+			logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
+		} catch (ZkTimeoutException zkte) {
+			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zookeeperHost);
+		} finally {
+			if (zkClient != null) {
+				zkClient.close();
+			}
+		}
+	}
+
+	@Test
+	public void testExceptionAndRetryDuringProcessing() throws InterruptedException, ExecutionException, TimeoutException {
+		final ODFInternalFactory odfFactory = new ODFInternalFactory();
+		final String groupId = "retrying-exception-dummy-consumer";
+		Properties kafkaConsumerProperties = new KafkaQueueManager().getConsumerConfigProperties(groupId, true);
+		kafkaConsumerProperties.put("group.id", groupId);
+		final List<String> consumedMsgs1 = new ArrayList<String>();
+		KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, new QueueMessageProcessor() {
+
+			@Override
+			public void process(ExecutorService executorService, String msg, int partition, long offset) {
+				consumedMsgs1.add(msg);
+				logger.info("retry_consumer process " + msg + " throw exception and try again");
+				throw new RuntimeException("Oops!");
+			}
+		});
+
+		final ThreadManager threadManager = odfFactory.create(ThreadManager.class);
+		final String consumerThread = "TEST_CONSUMER_RETRY_RUNNING";
+		threadManager.waitForThreadsToBeReady(10000, Arrays.asList(threadManager.startUnmanagedThread(consumerThread, cnsmr)));
+
+		sendMsg("TEST_MSG");
+		sendMsg("TEST_MSG2");
+
+		Thread.sleep(2000);
+
+		Assert.assertEquals(2 * KafkaQueueConsumer.MAX_PROCESSING_EXCEPTIONS, consumedMsgs1.size());
+
+		final ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(consumerThread);
+		Assert.assertEquals(ThreadState.RUNNING, stateOfUnmanagedThread);
+	}
+
+	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
+		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+
+		Properties props = odfConfig.getKafkaProducerProperties();
+		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
+		StringBuilder brokersString = new StringBuilder();
+		while (brokers.hasNext()) {
+			brokersString.append(brokers.next());
+			if (brokers.hasNext()) {
+				brokersString.append(",");
+			}
+		}
+		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+
+		final KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
+		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
+		producer.send(producerRecord).get(3000, TimeUnit.MILLISECONDS);
+		producer.close();
+	}
+
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java
new file mode 100755
index 0000000..cff538c
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/KafkaQueueManagerTest.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.settings.MessagingConfiguration;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.Callback;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.wink.json4j.JSONException;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackerStatus.STATUS;
+import org.apache.atlas.odf.api.discoveryservice.AnalysisRequestTracker;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.engine.ThreadStatus.ThreadState;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.AnalysisRequestTrackerStore;
+import org.apache.atlas.odf.core.controlcenter.DefaultStatusQueueStore;
+import org.apache.atlas.odf.core.controlcenter.DefaultThreadManager;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.controlcenter.StatusQueueEntry;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
+import org.apache.atlas.odf.core.controlcenter.TrackerUtil;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaProducerManager;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class KafkaQueueManagerTest extends ODFTestBase {
+
+	private static Long origRetention;
+	Logger logger = ODFTestLogger.get();
+	String zookeeperConnectString = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+
+	@BeforeClass
+	public static void setupTrackerRetention() throws ValidationException {
+		SettingsManager settingsManager = new ODFFactory().create().getSettingsManager();
+		//SETUP RETENTION TO KEEP TRACKERS!!!
+		final MessagingConfiguration messagingConfiguration = settingsManager.getODFSettings().getMessagingConfiguration();
+		origRetention = messagingConfiguration.getAnalysisRequestRetentionMs();
+		messagingConfiguration.setAnalysisRequestRetentionMs(120000000l);
+
+		ODFTestLogger.get().info("Set request retention to " + settingsManager.getODFSettings().getMessagingConfiguration().getAnalysisRequestRetentionMs());
+	}
+
+	@AfterClass
+	public static void cleanupTrackerRetention() throws ValidationException {
+		SettingsManager settingsManager = new ODFFactory().create().getSettingsManager();
+		ODFSettings settings = settingsManager.getODFSettings();
+		settings.getMessagingConfiguration().setAnalysisRequestRetentionMs(origRetention);
+		settingsManager.updateODFSettings(settings);
+	}
+
+	@Test
+	public void testStatusQueue() throws Exception {
+		KafkaQueueManager kqm = new KafkaQueueManager();
+
+		logger.info("Queue manager created");
+		AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json", null);
+
+		long before = System.currentTimeMillis();
+		tracker.setLastModified(before);
+		int maxEntries = 10;
+		for (int i = 0; i < maxEntries; i++) {
+			tracker.getRequest().setId("id" + i);
+			StatusQueueEntry sqe = new StatusQueueEntry();
+			sqe.setAnalysisRequestTracker(tracker);
+			kqm.enqueueInStatusQueue(sqe);
+
+			//			System.out.println("tracker "+i+" enqueued in status queue");
+		}
+		long after = System.currentTimeMillis();
+		logger.info("Time for enqueueing " + maxEntries + " objects: " + (after - before) + ", " + ((after - before) / maxEntries) + "ms per object");
+		Thread.sleep(100 * maxEntries);
+
+		AnalysisRequestTrackerStore store = new DefaultStatusQueueStore();
+
+		for (int i = 0; i < maxEntries; i++) {
+			logger.info("Querying status " + i);
+			AnalysisRequestTracker queriedTracker = store.query("id" + i);
+			Assert.assertNotNull(queriedTracker);
+			Assert.assertEquals(STATUS.FINISHED, queriedTracker.getStatus());
+		}
+
+		//	Thread.sleep(5000);
+		//	Assert.fail("you fail");
+		logger.info("Test testEnqueueStatusQueue finished");
+	}
+
+	/**
+	 * This test creates a tracker, puts it on the status queue, kills the service consumer and creates a new dummy consumer to put the offset of the service consumer behind the new tracker
+	 * Then the status consumer is shut down and its offset is reset in order to make it consume from the start again and thereby cleaning up stuck processes
+	 * Then kafka queue manager is re-initialized, causing all consumers to come up and triggering the cleanup process
+	 */
+	@Test
+	@Ignore("Adjust once ServiceRuntimes are fully implemented")
+	public void testStuckRequestCleanup() throws JSONException, InterruptedException, ExecutionException, TimeoutException {
+		final AnalysisRequestTracker tracker = JSONUtils.readJSONObjectFromFileInClasspath(AnalysisRequestTracker.class, "org/apache/atlas/odf/core/test/messaging/kafka/tracker1.json",
+				null);
+		tracker.setStatus(STATUS.IN_DISCOVERY_SERVICE_QUEUE);
+		tracker.setNextDiscoveryServiceRequest(0);
+		tracker.setLastModified(System.currentTimeMillis());
+		final String newTrackerId = "KAFKA_QUEUE_MANAGER_09_TEST" + UUID.randomUUID().toString();
+		tracker.getRequest().setId(newTrackerId);
+		DiscoveryServiceRequest dsRequest = TrackerUtil.getCurrentDiscoveryServiceStartRequest(tracker);
+		final DiscoveryServiceProperties discoveryServiceRegistrationInfo = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServicesProperties()
+				.get(0);
+		dsRequest.setDiscoveryServiceId(discoveryServiceRegistrationInfo.getId());
+		String dsID = dsRequest.getDiscoveryServiceId();
+		String topicName = KafkaQueueManager.SERVICE_TOPIC_PREFIX + dsID;
+		//Add tracker to queue, set offset behind request so that it should be cleanup
+
+		String consumerGroupId = "odf-topic-" + dsID + "_group";
+		String threadName = "Dummy_DiscoveryServiceQueueConsumer" + topicName;
+
+		final List<Throwable> multiThreadErrors = new ArrayList<Throwable>();
+		final DefaultThreadManager tm = new DefaultThreadManager();
+		logger.info("shutdown old test 09 consumer and replace with fake doing nothing");
+		for (int no = 0; no < discoveryServiceRegistrationInfo.getParallelismCount(); no++) {
+			tm.shutdownThreads(Collections.singletonList("DiscoveryServiceQueueConsumer" + topicName + "_" + no));
+		}
+		Properties kafkaConsumerProps = getKafkaConsumerConfigProperties(consumerGroupId);
+
+		final long[] producedMsgOffset = new long[1];
+
+		final CountDownLatch msgProcessingLatch = new CountDownLatch(1);
+		ThreadStartupResult created = tm.startUnmanagedThread(threadName, new KafkaQueueConsumer(topicName, kafkaConsumerProps, new QueueMessageProcessor() {
+
+			@Override
+			public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
+				logger.info("Dequeue without processing " + msgOffset);
+				if (msgOffset == producedMsgOffset[0]) {
+					try {
+						msgProcessingLatch.countDown();
+					} catch (Exception e) {
+						msgProcessingLatch.countDown();
+						multiThreadErrors.add(e);
+					}
+				}
+			}
+
+		}));
+
+		tm.waitForThreadsToBeReady(30000, Arrays.asList(created));
+
+		String key = tracker.getRequest().getId();
+		String value = JSONUtils.toJSON(tracker);
+
+		new DefaultStatusQueueStore().store(tracker);
+
+		KafkaMonitor kafkaMonitor = new ODFInternalFactory().create(KafkaMonitor.class);
+		List<String> origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
+		logger.info("Found status consumers: " + origQueueConsumers.toString() + ", shutting down StatusWatcher");
+
+		//kill status queue watcher so that it is restarted when queue manager is initialized and detects stuck requests
+		tm.shutdownThreads(Collections.singletonList("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0"));
+
+		int maxWaitForConsumerDeath = 60;
+		while (tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0") != ThreadState.NON_EXISTENT
+				|| tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0") != ThreadState.FINISHED && maxWaitForConsumerDeath > 0) {
+			maxWaitForConsumerDeath--;
+			Thread.sleep(500);
+		}
+
+		logger.info("Only 1 consumer left? " + maxWaitForConsumerDeath + " : " + tm.getStateOfUnmanagedThread("StatusWatcher" + KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE + "_0"));
+		logger.info(" set offset for status consumer to beginning so that it consumes from when restarting");
+		final int offset = 1000000;
+		for (String statusConsumerGroup : origQueueConsumers) {
+			if (statusConsumerGroup.contains("DSStatusWatcherConsumerGroup")) {
+				boolean success = false;
+				int retryCount = 0;
+				final int maxOffsetRetry = 20;
+				while (!success && retryCount < maxOffsetRetry) {
+					success = kafkaMonitor.setOffset(zookeeperConnectString, statusConsumerGroup, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE, 0, offset);
+					retryCount++;
+					Thread.sleep(500);
+				}
+
+				Assert.assertNotEquals(retryCount, maxOffsetRetry);
+				Assert.assertTrue(success);
+			}
+		}
+
+		new ODFInternalFactory().create(KafkaProducerManager.class).sendMsg(topicName, key, value, new Callback() {
+
+			@Override
+			public void onCompletion(RecordMetadata metadata, Exception exception) {
+				producedMsgOffset[0] = metadata.offset();
+			}
+		});
+
+		final boolean await = msgProcessingLatch.await(240, TimeUnit.SECONDS);
+		Assert.assertTrue(await);
+		if (await) {
+			logger.info("run after message consumption...");
+			AnalysisRequestTrackerStore store = new ODFInternalFactory().create(AnalysisRequestTrackerStore.class);
+			AnalysisRequestTracker storeTracker = store.query(tracker.getRequest().getId());
+			Assert.assertEquals(tracker.getRequest().getId(), storeTracker.getRequest().getId());
+			Assert.assertEquals(STATUS.IN_DISCOVERY_SERVICE_QUEUE, storeTracker.getStatus());
+
+			//start odf and cleanup here...
+			logger.info("shutdown all threads and restart ODF");
+			tm.shutdownAllUnmanagedThreads();
+
+			int threadKillRetry = 0;
+			while (tm.getNumberOfRunningThreads() > 0 && threadKillRetry < 20) {
+				Thread.sleep(500);
+				threadKillRetry++;
+			}
+
+			logger.info("All threads down, restart ODF " + threadKillRetry);
+
+			// Initialize analysis manager
+			new ODFFactory().create().getAnalysisManager();
+
+			kafkaMonitor = new ODFInternalFactory().create(KafkaMonitor.class);
+			origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
+			int healthRetrieveRetry = 0;
+			//wait for max of 40 secs for status consumer to come up. If it is, we can continue because ODF is restarted successfully
+			while (origQueueConsumers.isEmpty() && healthRetrieveRetry < 240) {
+				healthRetrieveRetry++;
+				Thread.sleep(500);
+				origQueueConsumers = kafkaMonitor.getConsumerGroups(zookeeperConnectString, KafkaQueueManager.TOPIC_NAME_STATUS_QUEUE);
+			}
+			Assert.assertNotEquals(healthRetrieveRetry, 240);
+
+			logger.info("initialized, wait for cleanup ... " + healthRetrieveRetry);
+			Thread.sleep(5000);
+			logger.info("Found health consumers: " + origQueueConsumers.toString());
+			logger.info("hopefully cleaned up ...");
+			AnalysisRequestTracker storedTracker = store.query(tracker.getRequest().getId());
+			Assert.assertEquals(STATUS.ERROR, storedTracker.getStatus());
+			logger.info("DONE CLEANING UP, ALL FINE");
+		}
+
+		Assert.assertEquals(0, multiThreadErrors.size());
+	}
+
+	public Properties getKafkaConsumerConfigProperties(String consumerGroupID) {
+		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+		Properties kafkaConsumerProps = odfConfig.getKafkaConsumerProperties();
+		kafkaConsumerProps.put("group.id", consumerGroupID);
+		if (zookeeperConnectString != null) {
+			kafkaConsumerProps.put("zookeeper.connect", zookeeperConnectString);
+		}
+
+		kafkaConsumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+
+		kafkaConsumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		kafkaConsumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		StringBuilder bld = new StringBuilder();
+		final Iterator<String> iterator = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperConnectString).iterator();
+		while (iterator.hasNext()) {
+			bld.append(iterator.next());
+			if (iterator.hasNext()) {
+				bld.append(",");
+			}
+		}
+		kafkaConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bld.toString());
+		kafkaConsumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
+
+		return kafkaConsumerProps;
+	}
+
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java
new file mode 100755
index 0000000..35b09e2
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MessageSearchConsumerTest.java
@@ -0,0 +1,193 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.apache.atlas.odf.core.messaging.kafka.MessageSearchConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.api.engine.PartitionOffsetInfo;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.test.ODFTestBase;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+
+import kafka.admin.AdminUtils;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+
+public class MessageSearchConsumerTest extends ODFTestBase {
+	private static final String TEST_SEARCH_STRING = "TEST_STRING_" + UUID.randomUUID().toString();
+	private static final String TEST_SEARCH_FAILURE_STRING = "TEST_FAILURE_STRING";
+	static Logger logger = ODFTestLogger.get();
+	final static String topicName = "MessageSearchConsumerTest" + UUID.randomUUID().toString();
+	private static final int PERFORMANCE_MSG_COUNT = 1000;
+	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+	private KafkaProducer<String, String> producer;
+
+	@BeforeClass
+	public static void createTopc() {
+		ZkClient zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
+		ZkUtils utils = new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false);
+		if (!AdminUtils.topicExists(utils, topicName)) {
+			AdminUtils.createTopic(utils, topicName, 2, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
+		}
+	}
+
+	@Test
+	public void testMsgSearchPerformance() throws InterruptedException, ExecutionException, TimeoutException {
+		logger.info("Producing msgs");
+		for (int no = 0; no < PERFORMANCE_MSG_COUNT; no++) {
+			sendMsg("DUMMY_MSG" + no);
+		}
+		sendMsg(TEST_SEARCH_STRING);
+		logger.info("Done producing ...");
+		Thread.sleep(200);
+
+		final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
+		final CountDownLatch searchLatch = new CountDownLatch(1);
+		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
+
+			@Override
+			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
+				logger.info("Done searching " + msgPositionMap.get(TEST_SEARCH_STRING).getOffset());
+				Assert.assertTrue(msgPositionMap.get(TEST_SEARCH_STRING).getOffset() > -1);
+				searchLatch.countDown();
+			}
+		}, Arrays.asList(TEST_SEARCH_STRING)));
+
+		boolean await = searchLatch.await(5, TimeUnit.SECONDS);
+		if (await) {
+			logger.info("Messages searched in time");
+		} else {
+			logger.warning("Couldnt finish search in time");
+		}
+
+		final CountDownLatch failureSearchLatch = new CountDownLatch(1);
+		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
+
+			@Override
+			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
+				logger.info("Done searching " + msgPositionMap.toString());
+				Assert.assertFalse(msgPositionMap.containsKey(TEST_SEARCH_FAILURE_STRING));
+				failureSearchLatch.countDown();
+			}
+		}, Arrays.asList(TEST_SEARCH_FAILURE_STRING)));
+
+		await = searchLatch.await(5, TimeUnit.SECONDS);
+		if (await) {
+			logger.info("Messages searched in time");
+		} else {
+			logger.warning("Couldnt finish search in time");
+		}
+	}
+
+	@Test
+	public void testMsgSearchSuccessAndFailure() throws InterruptedException, ExecutionException, TimeoutException {
+		sendMsg(TEST_SEARCH_STRING);
+
+		Thread.sleep(200);
+
+		final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
+		final CountDownLatch searchLatch = new CountDownLatch(1);
+		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
+
+			@Override
+			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
+				logger.info("Done searching " + msgPositionMap.get(TEST_SEARCH_STRING).getOffset());
+				Assert.assertTrue(msgPositionMap.get(TEST_SEARCH_STRING).getOffset() > -1);
+				searchLatch.countDown();
+			}
+		}, Arrays.asList(TEST_SEARCH_STRING)));
+
+		boolean await = searchLatch.await(5, TimeUnit.SECONDS);
+		if (await) {
+			logger.info("Messages searched in time");
+		} else {
+			logger.warning("Couldnt finish search in time");
+		}
+
+		final CountDownLatch failureSearchLatch = new CountDownLatch(1);
+		threadManager.startUnmanagedThread(UUID.randomUUID().toString() + "_searchThread", new MessageSearchConsumer(topicName, new MessageSearchConsumer.SearchCompletedCallback() {
+
+			@Override
+			public void onDoneSearching(Map<String, PartitionOffsetInfo> msgPositionMap) {
+				logger.info("Done searching " + msgPositionMap);
+				Assert.assertFalse(msgPositionMap.containsKey(TEST_SEARCH_FAILURE_STRING));
+				failureSearchLatch.countDown();
+			}
+		}, Arrays.asList(TEST_SEARCH_FAILURE_STRING)));
+
+		await = searchLatch.await(5, TimeUnit.SECONDS);
+		if (await) {
+			logger.info("Messages searched in time");
+		} else {
+			logger.warning("Couldnt finish search in time");
+		}
+	}
+
+	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
+		final KafkaProducer<String, String> producer = getProducer();
+		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
+		producer.send(producerRecord).get(15000, TimeUnit.MILLISECONDS);
+	}
+
+	private KafkaProducer<String, String> getProducer() {
+		if (this.producer == null) {
+			SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+			Properties props = odfConfig.getKafkaProducerProperties();
+			final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
+			StringBuilder brokersString = new StringBuilder();
+			while (brokers.hasNext()) {
+				brokersString.append(brokers.next());
+				if (brokers.hasNext()) {
+					brokersString.append(",");
+				}
+			}
+			props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+			producer = new KafkaProducer<String, String>(props);
+		}
+		return producer;
+	}
+
+	@After
+	public void closeProducer() {
+		if (getProducer() != null) {
+			getProducer().close();
+		}
+	}
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java
new file mode 100755
index 0000000..f97dd4e
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/MultiPartitionConsumerTest.java
@@ -0,0 +1,314 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.I0Itec.zkclient.exception.ZkTimeoutException;
+import org.apache.atlas.odf.api.engine.ThreadStatus;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInitializer;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.controlcenter.QueueMessageProcessor;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaMonitor;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueConsumer;
+import org.apache.atlas.odf.core.messaging.kafka.KafkaQueueManager;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager;
+import org.apache.atlas.odf.core.controlcenter.ThreadManager.ThreadStartupResult;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+
+import kafka.admin.AdminUtils;
+import kafka.common.TopicExistsException;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+
+public class MultiPartitionConsumerTest extends ODFTestcase {
+	static Logger logger = ODFTestLogger.get();
+	final static String topicName = "my_dummy_test_topic" + UUID.randomUUID().toString();
+	static String zookeeperHost = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+	static final int PARTITION_COUNT = 3;
+	private static final int MSG_PER_PARTITION = 5;
+	private final ThreadManager threadManager = new ODFInternalFactory().create(ThreadManager.class);
+
+	@BeforeClass
+	public static void setupTopic() {
+		ZkClient zkClient = null;
+		try {
+			zkClient = new ZkClient(zookeeperHost, 5000, 5000, ZKStringSerializer$.MODULE$);
+			logger.log(Level.FINEST, "Checking if topic ''{0}'' already exists", topicName);
+			// using partition size 1 and replication size 1, no special
+			// per-topic config needed
+			logger.log(Level.FINE, "Topic ''{0}'' does not exist, creating it", topicName);
+			//FIXME zkUtils isSecure parameter? Only with SSL! --> parse zkhosts?
+			AdminUtils.createTopic(new ZkUtils(zkClient, new ZkConnection(zookeeperHost), false), topicName, PARTITION_COUNT, 1, new Properties(), KafkaQueueManager.DEFAULT_RACK_AWARE_MODE);
+			logger.log(Level.FINE, "Topic ''{0}'' created", topicName);
+		} catch (TopicExistsException ex) {
+			logger.log(Level.FINE, "Topic ''{0}'' already exists.", topicName);
+		} catch (ZkTimeoutException zkte) {
+			logger.log(Level.SEVERE, "Could not connect to the Zookeeper instance at ''{0}''. Please ensure that Zookeeper is running", zookeeperHost);
+		} finally {
+			if (zkClient != null) {
+				zkClient.close();
+			}
+		}
+	}
+
+	@After
+	public void cleanupConsumers() {
+		logger.info("Cleaning up consumers...");
+		logger.info("----------------------------------  Stopping ODF...");
+		ODFInitializer.stop();
+		logger.info("----------------------------------  Starting ODF...");
+		ODFInitializer.start();
+		logger.info("----------------------------------  ODF started.");
+	}
+
+	@Test
+	public void testMultiPartitionDelayedConsumption() throws InterruptedException, ExecutionException {
+		Properties kafkaConsumerProperties = getConsumerProps();
+		final List<String> consumedMsgs = new ArrayList<String>();
+		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
+
+		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
+		final int processingDelay = 2000;
+		for (int no = 0; no < PARTITION_COUNT; no++) {
+			final int currentThread = no;
+			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
+
+				@Override
+				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
+					try {
+						Thread.sleep(processingDelay);
+					} catch (InterruptedException e) {
+						// TODO Auto-generated catch block
+						e.printStackTrace();
+					}
+					consumedMsgs.add(msg);
+					logger.info("process " + msg + " in thread " + currentThread);
+				}
+			};
+
+			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
+
+			final String consumerThread = threadPrefix + no;
+			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
+			startupList.add(startUnmanagedThread);
+		}
+		try {
+			threadManager.waitForThreadsToBeReady(30000, startupList);
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
+					sendMsg("Partition " + no + " msg " + msgNo);
+				}
+			}
+
+			int totalWait = 0;
+			while (totalWait < PARTITION_COUNT * MSG_PER_PARTITION * processingDelay + 10000 && consumedMsgs.size() < PARTITION_COUNT * MSG_PER_PARTITION) {
+				Thread.sleep(2000);
+				totalWait += 2000;
+			}
+
+			logger.info("Done with all messages after " + totalWait);
+
+			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
+
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
+				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
+			}
+		} catch (TimeoutException e) {
+			Assert.fail("Consumer could not be started on time");
+		}
+	}
+
+	@Test
+	public void testMultiPartitionConsumption() throws InterruptedException, ExecutionException {
+		Properties kafkaConsumerProperties = getConsumerProps();
+		final List<String> consumedMsgs = new ArrayList<String>();
+		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
+
+		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
+		for (int no = 0; no < PARTITION_COUNT; no++) {
+			final int currentThread = no;
+			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
+
+				@Override
+				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
+					consumedMsgs.add(msg);
+					logger.info("process " + msg + " in thread " + currentThread);
+				}
+			};
+
+			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
+
+			final String consumerThread = threadPrefix + no;
+			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
+			startupList.add(startUnmanagedThread);
+		}
+		try {
+			threadManager.waitForThreadsToBeReady(30000, startupList);
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
+					sendMsg("Partition " + no + " msg " + msgNo);
+				}
+			}
+
+			int totalWait = 0;
+			boolean done = false;
+			while (totalWait < 30 && !done) {
+				if (consumedMsgs.size() == PARTITION_COUNT * MSG_PER_PARTITION) {
+					done = true;
+				}
+				totalWait++;
+				Thread.sleep(500);
+			}
+
+			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
+
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
+				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
+			}
+		} catch (TimeoutException e) {
+			Assert.fail("Consumer could not be started on time");
+		}
+	}
+
+	@Test
+	public void testMultiPartitionExceptionAndRetryDuringProcessing() throws InterruptedException, ExecutionException {
+		Properties kafkaConsumerProperties = getConsumerProps();
+		final List<String> consumedMsgs = new ArrayList<String>();
+		List<ThreadStartupResult> startupList = new ArrayList<ThreadStartupResult>();
+
+		final String threadPrefix = "TEST_CONSUMER_RETRY_RUNNING_";
+		for (int no = 0; no < PARTITION_COUNT; no++) {
+			final int currentThread = no;
+			final QueueMessageProcessor requestConsumer = new QueueMessageProcessor() {
+
+				private int excCount = 0;
+
+				@Override
+				public void process(ExecutorService executorService, String msg, int partition, long msgOffset) {
+					if (excCount < KafkaQueueConsumer.MAX_PROCESSING_EXCEPTIONS - 1) {
+						excCount++;
+						logger.info("Throw exception " + excCount + " on consumer " + currentThread);
+						throw new RuntimeException("Forced error on consumer");
+					}
+					consumedMsgs.add(msg);
+					logger.info("process " + msg + " in thread " + currentThread);
+				}
+			};
+
+			KafkaQueueConsumer cnsmr = new KafkaQueueConsumer(topicName, kafkaConsumerProperties, requestConsumer);
+
+			final String consumerThread = threadPrefix + no;
+			final ThreadStartupResult startUnmanagedThread = threadManager.startUnmanagedThread(consumerThread, cnsmr);
+			startupList.add(startUnmanagedThread);
+		}
+		try {
+			threadManager.waitForThreadsToBeReady(30000, startupList);
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				for (int msgNo = 0; msgNo < MSG_PER_PARTITION; msgNo++) {
+					sendMsg("Partition " + no + " msg " + msgNo);
+				}
+			}
+
+			int totalWait = 0;
+			boolean done = false;
+			while (totalWait < 30 && !done) {
+				if (consumedMsgs.size() == PARTITION_COUNT * MSG_PER_PARTITION) {
+					done = true;
+				}
+				totalWait++;
+				Thread.sleep(500);
+			}
+			Assert.assertEquals(PARTITION_COUNT * MSG_PER_PARTITION, consumedMsgs.size());
+
+			for (int no = 0; no < PARTITION_COUNT; no++) {
+				final ThreadStatus.ThreadState stateOfUnmanagedThread = threadManager.getStateOfUnmanagedThread(threadPrefix + no);
+				Assert.assertEquals(ThreadStatus.ThreadState.RUNNING, stateOfUnmanagedThread);
+			}
+		} catch (TimeoutException e) {
+			Assert.fail("Consumer could not be started on time");
+		}
+	}
+
+	private Properties getConsumerProps() {
+		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+		Properties kafkaConsumerProperties = odfConfig.getKafkaConsumerProperties();
+		final String groupId = "retrying-dummy-consumer";
+		kafkaConsumerProperties.put("group.id", groupId);
+		kafkaConsumerProperties.put("zookeeper.connect", zookeeperHost);
+		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
+		StringBuilder brokersString = new StringBuilder();
+		while (brokers.hasNext()) {
+			brokersString.append(brokers.next());
+			if (brokers.hasNext()) {
+				brokersString.append(",");
+			}
+		}
+		kafkaConsumerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+		kafkaConsumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
+		kafkaConsumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+		kafkaConsumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
+
+		return kafkaConsumerProperties;
+	}
+
+	void sendMsg(String msg) throws InterruptedException, ExecutionException, TimeoutException {
+		SettingsManager odfConfig = new ODFFactory().create().getSettingsManager();
+		Properties props = odfConfig.getKafkaProducerProperties();
+		final Iterator<String> brokers = new ODFInternalFactory().create(KafkaMonitor.class).getBrokers(zookeeperHost).iterator();
+		StringBuilder brokersString = new StringBuilder();
+		while (brokers.hasNext()) {
+			brokersString.append(brokers.next());
+			if (brokers.hasNext()) {
+				brokersString.append(",");
+			}
+		}
+		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokersString.toString());
+		//Should we use a custom partitioner? we could try to involve consumer offsets and always put on "emptiest" partition
+		//props.put("partitioner.class", TestMessagePartitioner.class);
+
+		final KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
+		ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topicName, UUID.randomUUID().toString(), msg);
+		producer.send(producerRecord).get(3000, TimeUnit.MILLISECONDS);
+		producer.close();
+	}
+
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java
new file mode 100755
index 0000000..d1c9810
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceErrorTest.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
+
+public class ParallelServiceErrorTest extends ODFTestcase {
+	private static final int NUMBER_OF_QUEUED_REQUESTS = 1;
+	Logger log = ODFTestLogger.get();
+
+	@Test
+	public void runDataSetsInParallelError() throws Exception {
+		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "errorID2" }), AnalysisRequestStatus.State.FINISHED, AnalysisRequestStatus.State.ERROR);
+	}
+
+	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, AnalysisRequestStatus.State... expectedState) throws Exception {
+		log.info("Running data sets in parallel: " + dataSetIDs);
+		log.info("Expected state: " + expectedState);
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+
+		List<AnalysisRequest> requests = new ArrayList<AnalysisRequest>();
+		List<AnalysisResponse> responses = new ArrayList<AnalysisResponse>();
+		List<String> idList = new ArrayList<String>();
+
+		for (int no = 0; no < NUMBER_OF_QUEUED_REQUESTS; no++) {
+			for (String dataSet : dataSetIDs) {
+				final AnalysisRequest req = ODFAPITest.createAnalysisRequest(Arrays.asList(dataSet + UUID.randomUUID().toString()));
+				AnalysisResponse resp = analysisManager.runAnalysis(req);
+				req.setId(resp.getId());
+				requests.add(req);
+				idList.add(resp.getId());
+				responses.add(resp);
+			}
+		}
+		log.info("Parallel requests started: " + idList.toString());
+
+		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), requests.size());
+		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), responses.size());
+
+		// check that requests are processed in parallel: 
+		//   there must be a point in time where both requests are in status "active"
+		log.info("Polling for status of parallel request...");
+		boolean foundPointInTimeWhereBothRequestsAreActive = false;
+		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
+		List<AnalysisRequestStatus.State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
+		do {
+			int foundActive = 0;
+			allSingleStates.clear();
+			for (AnalysisRequest request : requests) {
+				final AnalysisRequestStatus.State state = analysisManager.getAnalysisRequestStatus(request.getId()).getState();
+				if (state == AnalysisRequestStatus.State.ACTIVE) {
+					log.info("ACTIVE: " + request.getId() + " foundactive: " + foundActive);
+					foundActive++;
+				} else {
+					log.info("NOT ACTIVE " + request.getId() + " _ " + state);
+				}
+				allSingleStates.add(state);
+			}
+			if (foundActive > 1) {
+				foundPointInTimeWhereBothRequestsAreActive = true;
+			}
+
+			maxPolls--;
+			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
+		} while (maxPolls > 0 && Utils.containsNone(allSingleStates, new AnalysisRequestStatus.State[] { AnalysisRequestStatus.State.ACTIVE, AnalysisRequestStatus.State.QUEUED }));
+
+		Assert.assertTrue(maxPolls > 0);
+		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
+		Assert.assertTrue(allSingleStates.containsAll(Arrays.asList(expectedState)));
+	}
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java
new file mode 100755
index 0000000..7a180d2
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/ParallelServiceTest.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.analysis.AnalysisManager;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.core.test.ODFTestLogger;
+import org.apache.atlas.odf.core.test.ODFTestcase;
+import org.apache.atlas.odf.core.test.controlcenter.ODFAPITest;
+
+public class ParallelServiceTest extends ODFTestcase {
+	private static final int NUMBER_OF_QUEUED_REQUESTS = 1;
+	Logger log = ODFTestLogger.get();
+
+	@Test
+	public void runDataSetsInParallelSuccess() throws Exception {
+		runDataSetsInParallelAndCheckResult(Arrays.asList(new String[] { "successID1", "successID2" }), State.FINISHED, State.FINISHED);
+	}
+
+	private void runDataSetsInParallelAndCheckResult(List<String> dataSetIDs, State... expectedState) throws Exception {
+		log.info("Running data sets in parallel: " + dataSetIDs);
+		log.info("Expected state: " + expectedState);
+		AnalysisManager analysisManager = new ODFFactory().create().getAnalysisManager();
+
+		List<AnalysisRequest> requests = new ArrayList<AnalysisRequest>();
+		List<AnalysisResponse> responses = new ArrayList<AnalysisResponse>();
+		List<String> idList = new ArrayList<String>();
+
+		for (int no = 0; no < NUMBER_OF_QUEUED_REQUESTS; no++) {
+			for (String dataSet : dataSetIDs) {
+				final AnalysisRequest req = ODFAPITest.createAnalysisRequest(Arrays.asList(dataSet + UUID.randomUUID().toString()));
+				AnalysisResponse resp = analysisManager.runAnalysis(req);
+				req.setId(resp.getId());
+				requests.add(req);
+				idList.add(resp.getId());
+				responses.add(resp);
+			}
+		}
+		log.info("Parallel requests started: " + idList.toString());
+
+		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), requests.size());
+		Assert.assertEquals(NUMBER_OF_QUEUED_REQUESTS * dataSetIDs.size(), responses.size());
+
+		// check that requests are processed in parallel: 
+		//   there must be a point in time where both requests are in status "active"
+		log.info("Polling for status of parallel request...");
+		boolean foundPointInTimeWhereBothRequestsAreActive = false;
+		int maxPolls = ODFAPITest.MAX_NUMBER_OF_POLLS;
+		List<State> allSingleStates = new ArrayList<AnalysisRequestStatus.State>();
+		do {
+			int foundActive = 0;
+			allSingleStates.clear();
+			for (AnalysisRequest request : requests) {
+				final State state = analysisManager.getAnalysisRequestStatus(request.getId()).getState();
+				if (state == State.ACTIVE) {
+					log.info("ACTIVE: " + request.getId() + " foundactive: " + foundActive);
+					foundActive++;
+				} else {
+					log.info("NOT ACTIVE " + request.getId() + " _ " + state);
+				}
+				allSingleStates.add(state);
+			}
+			if (foundActive > 1) {
+				foundPointInTimeWhereBothRequestsAreActive = true;
+			}
+
+			maxPolls--;
+			Thread.sleep(ODFAPITest.WAIT_MS_BETWEEN_POLLING);
+		} while (maxPolls > 0 && Utils.containsNone(allSingleStates, new State[] { State.ACTIVE, State.QUEUED }));
+
+		Assert.assertTrue(maxPolls > 0);
+		Assert.assertTrue(foundPointInTimeWhereBothRequestsAreActive);
+		Assert.assertTrue(allSingleStates.containsAll(Arrays.asList(expectedState)));
+	}
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java
new file mode 100755
index 0000000..5e3d97e
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestEnvironmentMessagingInitializer.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.test.TestEnvironmentInitializer;
+
+public class TestEnvironmentMessagingInitializer implements TestEnvironmentInitializer {
+
+	public TestEnvironmentMessagingInitializer() {
+	}
+	
+	public void start() {
+		Logger logger = Logger.getLogger(TestEnvironmentMessagingInitializer.class.getName());
+		try {
+			logger.info("Starting Test-Kafka during initialization...");
+			TestKafkaStarter starter = new TestKafkaStarter();
+			starter.startKafka();
+			logger.info("Test-Kafka initialized");
+		} catch (Exception exc) {
+			logger.log(Level.INFO, "Exception occurred while starting test kafka", exc);
+			throw new RuntimeException(exc);
+		}
+	}
+
+	@Override
+	public void stop() {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public String getName() {
+		return "Kafka1001";
+	}
+}
diff --git a/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java
new file mode 100755
index 0000000..1c3025e
--- /dev/null
+++ b/odf/odf-messaging/src/test/java/org/apache/atlas/odf/core/test/messaging/kafka/TestKafkaStarter.java
@@ -0,0 +1,306 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.test.messaging.kafka;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.BindException;
+import java.net.DatagramSocket;
+import java.net.ServerSocket;
+import java.rmi.NotBoundException;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkConnection;
+import org.apache.kafka.common.protocol.SecurityProtocol;
+import org.apache.wink.json4j.JSONObject;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.Watcher.Event.KeeperState;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooKeeper.States;
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+
+import org.apache.atlas.odf.core.Utils;
+
+import kafka.cluster.Broker;
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServerStartable;
+import kafka.utils.ZKStringSerializer$;
+import kafka.utils.ZkUtils;
+import scala.collection.JavaConversions;
+import scala.collection.Seq;
+
+public class TestKafkaStarter {
+
+	public static boolean deleteRecursive(File path) throws FileNotFoundException {
+		if (!path.exists()) {
+			throw new FileNotFoundException(path.getAbsolutePath());
+		}
+		boolean ret = true;
+		if (path.isDirectory()) {
+			for (File f : path.listFiles()) {
+				ret = ret && deleteRecursive(f);
+			}
+		}
+		return ret && path.delete();
+	}
+
+	static Thread zookeeperThread = null;
+	static boolean kafkaStarted = false;
+	static Object lockObject = new Object();
+	static KafkaServerStartable kafkaServer = null;
+	static ZooKeeperServerMainWithShutdown zooKeeperServer = null;
+
+
+	boolean cleanData = true; // all data is cleaned at server start !!
+
+	public boolean isCleanData() {
+		return cleanData;
+	}
+
+	public void setCleanData(boolean cleanData) {
+		this.cleanData = cleanData;
+	}
+
+	Logger logger = Logger.getLogger(TestKafkaStarter.class.getName());
+
+	void log(String s) {
+		logger.info(s);
+	}
+
+	int zookeeperStartupTime = 10000;
+	int kafkaStartupTime = 10000;
+
+	static class ZooKeeperServerMainWithShutdown extends ZooKeeperServerMain {
+		public void shutdown() {
+			super.shutdown();
+		}
+	}
+
+	private void startZookeeper() throws Exception {
+		log("Starting zookeeper");
+
+		final Properties zkProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties");
+		final String zkPort = (String) zkProps.get("clientPort");
+		if (zooKeeperServer == null) {
+			log("zookeeper properties: " + zkProps);
+			if (cleanData) {
+				String dataDir = zkProps.getProperty("dataDir");
+				log("Removing all data from zookeeper data dir " + dataDir);
+				File dir = new File(dataDir);
+				if (dir.exists()) {
+					if (!deleteRecursive(dir)) {
+						throw new IOException("Could not delete directory " + dataDir);
+					}
+				}
+			}
+			final ZooKeeperServerMainWithShutdown zk = new ZooKeeperServerMainWithShutdown();
+			final ServerConfig serverConfig = new ServerConfig();
+			log("Loading zookeeper config...");
+			QuorumPeerConfig zkConfig = new QuorumPeerConfig();
+			zkConfig.parseProperties(zkProps);
+			serverConfig.readFrom(zkConfig);
+
+			Runnable zookeeperStarter = new Runnable() {
+
+				@Override
+				public void run() {
+					try {
+						log("Now starting Zookeeper with API...");
+						zk.runFromConfig(serverConfig);
+					} catch (BindException ex) {
+						log("Embedded zookeeper could not be started, port is already in use. Trying to use external zookeeper");
+						ZooKeeper zk = null;
+						try {
+							zk = new ZooKeeper("localhost:" + zkPort, 5000, null);
+							if (zk.getState().equals(States.CONNECTED)) {
+								log("Using existing zookeeper running on port " + zkPort);
+								return;
+							} else {
+								throw new NotBoundException();
+							}
+						} catch (Exception zkEx) {
+							throw new RuntimeException("Could not connect to zookeeper on port " + zkPort + ". Please close all applications listening on this port.");
+						} finally {
+							if (zk != null) {
+								try {
+									zk.close();
+								} catch (InterruptedException e) {
+									logger.log(Level.WARNING, "An error occured closing the zk connection", e);
+								}
+							}
+						}
+					} catch (Exception e) {
+						e.printStackTrace();
+						throw new RuntimeException(e);
+					}
+
+				}
+			};
+
+			zookeeperThread = new Thread(zookeeperStarter);
+			zookeeperThread.setDaemon(true);
+			zookeeperThread.start();
+			log("Zookeeper start initiated");
+			zooKeeperServer = zk;
+		}
+		ZkConnection conn = new ZkConnection("localhost:" + zkPort);
+		final CountDownLatch latch = new CountDownLatch(1);
+		conn.connect(new Watcher() {
+
+			@Override
+			public void process(WatchedEvent event) {
+				log("Zookeeper event: " + event.getState());
+				if (event.getState().equals(KeeperState.SyncConnected)) {
+					log("Zookeeper server up and running");
+					latch.countDown();
+				}
+			}
+		});
+
+		boolean zkReady = latch.await(zookeeperStartupTime, TimeUnit.MILLISECONDS);
+		if (zkReady) {
+			log("Zookeeper initialized and started");
+
+		} else {
+			logger.severe("Zookeeper could not be initialized within " + (zookeeperStartupTime / 1000) + " sec");
+		}
+		conn.close();
+	}
+
+	public boolean isRunning() {
+		return kafkaStarted;
+	}
+
+	public void startKafka() throws Exception {
+		synchronized (lockObject) {
+			if (kafkaStarted) {
+				log("Kafka already running");
+				return;
+			}
+			this.startZookeeper();
+
+			log("Starting Kafka server...");
+			Properties kafkaProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties");
+			log("Kafka properties: " + kafkaProps);
+			KafkaConfig kafkaConfig = new KafkaConfig(kafkaProps);
+			int kafkaPort = kafkaConfig.port();
+			if (cleanData && isPortAvailable(kafkaPort)) {
+				String logDir = kafkaProps.getProperty("log.dirs");
+				log("Removing all data from kafka log dir: " + logDir);
+				File dir = new File(logDir);
+				if (dir.exists()) {
+					if (!deleteRecursive(dir)) {
+						throw new IOException("Kafka logDir could not be deleted: " + logDir);
+					}
+				}
+			}
+			if (!isPortAvailable(kafkaPort)) {
+				log("Kafka port " + kafkaPort + " is already in use. "
+						+ "Checking if zookeeper has a registered broker on this port to make sure it is an existing kafka instance using the port.");
+				ZooKeeper zk = new ZooKeeper(kafkaConfig.zkConnect(), 10000, null);
+				try {
+					List<String> ids = zk.getChildren("/brokers/ids", false);
+					if (ids != null && !ids.isEmpty()) {
+						for (String id : ids) {
+							String brokerInfo = new String(zk.getData("/brokers/ids/" + id, false, null), "UTF-8");
+							JSONObject broker = new JSONObject(brokerInfo);
+							Integer port = new Integer(String.valueOf(broker.get("port")));
+							if (port != null && port.equals(kafkaPort)) {
+								log("Using externally started kafka broker on port " + port);
+								kafkaStarted = true;
+								return;
+							}
+						}
+					}
+				} catch (NoNodeException ex) {
+					log("No brokers registered with zookeeper!");
+					throw new RuntimeException("Kafka broker port " + kafkaPort
+							+ " not available and no broker found! Please close all running applications listening on this port");
+				} finally {
+					if (zk != null) {
+						try {
+							zk.close();
+						} catch (InterruptedException e) {
+							logger.log(Level.WARNING, "An error occured closing the zk connection", e);
+						}
+					}
+				}
+			}
+			KafkaServerStartable kafka  = KafkaServerStartable.fromProps(kafkaProps);
+			kafka.startup();
+			log("Kafka server start initiated");
+
+			kafkaServer = kafka;
+			log("Give Kafka a maximum of " + kafkaStartupTime + " ms to start");
+			ZkClient zk = new ZkClient(kafkaConfig.zkConnect(), 10000, 5000, ZKStringSerializer$.MODULE$);
+			int maxRetryCount = kafkaStartupTime / 1000;
+			int cnt = 0;
+			while (cnt < maxRetryCount) {
+				cnt++;
+				Seq<Broker> allBrokersInCluster = new ZkUtils(zk, new ZkConnection(kafkaConfig.zkConnect()), false).getAllBrokersInCluster();
+				List<Broker> brokers = JavaConversions.seqAsJavaList(allBrokersInCluster);
+				for (Broker broker : brokers) {
+					if (broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port() == kafkaPort) {
+						log("Broker is registered, Kafka is available after " + cnt + " seconds");
+						kafkaStarted = true;
+						return;
+					}
+				}
+				Thread.sleep(1000);
+			}
+			logger.severe("Kafka broker was not started after " + kafkaStartupTime + " ms");
+		}
+	}
+
+	public void shutdownKafka() {
+		// do nothing for shutdown
+	}
+
+	boolean isPortAvailable(int port) {
+		ServerSocket ss = null;
+		DatagramSocket ds = null;
+		try {
+			ss = new ServerSocket(port);
+			ss.setReuseAddress(true);
+			ds = new DatagramSocket(port);
+			ds.setReuseAddress(true);
+			return true;
+		} catch (IOException e) {
+		} finally {
+			if (ds != null) {
+				ds.close();
+			}
+
+			if (ss != null) {
+				try {
+					ss.close();
+				} catch (IOException e) {
+				}
+			}
+		}
+
+		return false;
+	}
+}
diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties
new file mode 100755
index 0000000..4769c95
--- /dev/null
+++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-kafka.properties
@@ -0,0 +1,136 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+listeners=PLAINTEXT://:9092 
+
+# The port the socket server listens on
+# port=9092
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+#host.name=localhost
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=3
+ 
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/tmp/odf-embedded-test-kafka/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk. 
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=24
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according 
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeperConnectionTimeoutMs=6000
diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties
new file mode 100755
index 0000000..7234e9c
--- /dev/null
+++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties
@@ -0,0 +1,34 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=/tmp/odf-embedded-test-kafka/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
+
diff --git a/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..69f1860
--- /dev/null
+++ b/odf/odf-messaging/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,18 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+## USE for TESTs only
+
+org.apache.atlas.odf.core.store.ODFConfigurationStorage=org.apache.atlas.odf.core.test.store.MockConfigurationStorage
+org.apache.atlas.odf.api.spark.SparkServiceExecutor=org.apache.atlas.odf.core.test.spark.MockSparkServiceExecutor
+org.apache.atlas.odf.core.notification.NotificationManager=org.apache.atlas.odf.core.test.notification.TestNotificationManager
diff --git a/odf/odf-spark-example-application/.gitignore b/odf/odf-spark-example-application/.gitignore
new file mode 100755
index 0000000..d523581
--- /dev/null
+++ b/odf/odf-spark-example-application/.gitignore
@@ -0,0 +1,20 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+.DS_Store
+/bin/
diff --git a/odf/odf-spark-example-application/pom.xml b/odf/odf-spark-example-application/pom.xml
new file mode 100755
index 0000000..d036d44
--- /dev/null
+++ b/odf/odf-spark-example-application/pom.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-spark-example-application</artifactId>
+	<packaging>jar</packaging>
+	<name>odf-spark-example-application</name>
+	<build>
+		<plugins>
+			<plugin>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<version>3.3</version>
+				<configuration>
+					<source>1.7</source>
+					<target>1.7</target>
+				</configuration>
+			</plugin>
+			<plugin>
+				<artifactId>maven-assembly-plugin</artifactId>
+				<executions>
+					<execution>
+						<phase>package</phase>
+						<goals>
+							<goal>single</goal>
+						</goals>
+					</execution>
+				</executions>
+				<configuration>
+					<descriptorRefs>
+						<descriptorRef>jar-with-dependencies</descriptorRef>
+					</descriptorRefs>
+				</configuration>
+			</plugin>
+		</plugins>
+
+	</build>
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-sql_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+		</dependency>
+		<dependency> <!-- Spark dependency -->
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-core_2.11</artifactId>
+			<version>2.1.0</version>
+			<scope>provided</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+	</dependencies>
+</project>
diff --git a/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java
new file mode 100755
index 0000000..f5f7b70
--- /dev/null
+++ b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SparkDiscoveryServiceExample.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.spark;
+
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.spark.SparkDiscoveryServiceBase;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+
+import org.apache.atlas.odf.api.spark.SparkDiscoveryService;
+import org.apache.atlas.odf.api.spark.SparkUtils;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse.ResponseCode;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+
+public class SparkDiscoveryServiceExample extends SparkDiscoveryServiceBase implements SparkDiscoveryService {
+	static Logger logger = Logger.getLogger(SparkDiscoveryServiceExample.class.getName());
+
+	@Override
+	public DataSetCheckResult checkDataSet(DataSetContainer dataSetContainer) {
+		logger.log(Level.INFO, "Checking data set access.");
+		DataSetCheckResult checkResult = new DataSetCheckResult();
+		checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
+		Dataset<Row> df = SparkUtils.createDataFrame(this.spark, dataSetContainer, this.mds);
+		// Print first rows to check whether data frame can be accessed
+		df.show(10);
+		return checkResult;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceRequest request) {
+		logger.log(Level.INFO, "Starting discovery service.");
+		Dataset<Row> df = SparkUtils.createDataFrame(spark, request.getDataSetContainer(), this.mds);
+		Map<String,Dataset<Row>> annotationDataFrameMap = SummaryStatistics.processDataFrame(this.spark, df, null);
+		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+		response.setCode(ResponseCode.OK);
+		response.setDetails("Discovery service successfully completed.");
+		response.setResult(SparkUtils.createAnnotationsFromDataFrameMap(request.getDataSetContainer(), annotationDataFrameMap, this.mds));
+		return response;
+	}
+}
diff --git a/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java
new file mode 100755
index 0000000..a7d1542
--- /dev/null
+++ b/odf/odf-spark-example-application/src/main/java/org/apache/atlas/odf/core/spark/SummaryStatistics.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.spark;
+
+import org.apache.atlas.odf.api.spark.SparkUtils;
+import org.apache.spark.SparkFiles;
+
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.spark.sql.Column;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+
+public class SummaryStatistics {
+	static Logger logger = Logger.getLogger(SummaryStatistics.class.getName());
+	private static final String CSV_FILE_PARAMETER = "-dataFile=";
+	// The following constant is defined in class DiscoveryServiceSparkEndpoint but is duplicated here to avoid dependencies to the ODF code:
+	private static final String ANNOTATION_PROPERTY_COLUMN_NAME = "ODF_ANNOTATED_COLUMN";
+
+	// The main method is only available for testing purposes and is not called by ODF
+	public static void main(String[] args) {
+		logger.log(Level.INFO, "Running spark launcher with arguments: " + args[0]);
+		if ((args[0] == null) || (!args[0].startsWith(CSV_FILE_PARAMETER))) {
+			System.out.println(MessageFormat.format("Error: Spark Application Parameter '{0}' is missing.", CSV_FILE_PARAMETER));
+			System.exit(1);
+		}
+		String dataFilePath = SparkFiles.get(args[0].replace(CSV_FILE_PARAMETER, ""));
+		logger.log(Level.INFO, "Data file path is " + dataFilePath);
+
+		// Create Spark session
+		SparkSession spark = SparkSession.builder().master("local").appName("ODF Spark example application").getOrCreate();
+
+		// Read CSV file into data frame
+		Dataset<Row> df = spark.read()
+		    .format("com.databricks.spark.csv")
+		    .option("inferSchema", "true")
+		    .option("header", "true")
+		    .load(dataFilePath);
+
+		// Run actual job and print result
+		Map<String, Dataset<Row>> annotationDataFrameMap = null;
+		try {
+			annotationDataFrameMap = processDataFrame(spark, df, args);
+		} catch (Exception e) {
+			logger.log(Level.INFO, MessageFormat.format("An error occurred while processing data set {0}:", args[0]), e);
+		} finally {
+			// Close and stop spark context
+			spark.close();
+			spark.stop();
+		}
+		if (annotationDataFrameMap == null) {
+			System.exit(1);
+		} else {
+			// Print all annotationDataFrames for all annotation types to stdout
+			for (Map.Entry<String, Dataset<Row>> entry : annotationDataFrameMap.entrySet()) {
+				logger.log(Level.INFO, "Result data frame for annotation type " + entry.getKey() + ":");
+				entry.getValue().show();
+			}
+		}
+	}
+
+	// The following method contains the actual implementation of the ODF Spark discovery service
+	public static Map<String,Dataset<Row>> processDataFrame(SparkSession spark, Dataset<Row> df, String[] args) {
+		logger.log(Level.INFO, "Started summary statistics Spark application.");
+		Map<String, Dataset<Row>> resultMap = new HashMap<String, Dataset<Row>>();
+
+		// Print input data set
+		df.show();
+
+		// Create column annotation data frame that contains basic data frame statistics
+		Dataset<Row> dfStatistics = df.describe();
+
+		// Rename "summary" column to ANNOTATION_PROPERTY_COLUMN_NAME
+		String[] columnNames = dfStatistics.columns();
+		columnNames[0] = ANNOTATION_PROPERTY_COLUMN_NAME;
+		Dataset<Row> summaryStatistics =  dfStatistics.toDF(columnNames);
+		summaryStatistics.show();
+		String columnAnnotationTypeName = "SparkSummaryStatisticsAnnotation";
+
+		// Transpose table to turn it into format required by ODF
+		Dataset<Row> columnAnnotationDataFrame = SparkUtils.transposeDataFrame(spark, summaryStatistics);
+		columnAnnotationDataFrame.show();
+
+		// Create table annotation that contains the data frame's column count
+		String tableAnnotationTypeName = "SparkTableAnnotation";
+		Dataset<Row> tableAnnotationDataFrame = columnAnnotationDataFrame.select(new Column("count")).limit(1);
+		tableAnnotationDataFrame.show();
+
+		// Add annotation data frames to result map
+		resultMap.put(columnAnnotationTypeName, columnAnnotationDataFrame);
+		resultMap.put(tableAnnotationTypeName, tableAnnotationDataFrame);
+
+		logger.log(Level.INFO, "Spark job finished.");
+		return resultMap;
+	}
+}
diff --git a/odf/odf-spark/.gitignore b/odf/odf-spark/.gitignore
new file mode 100755
index 0000000..cde346c
--- /dev/null
+++ b/odf/odf-spark/.gitignore
@@ -0,0 +1,19 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+.DS_Store
diff --git a/odf/odf-spark/pom.xml b/odf/odf-spark/pom.xml
new file mode 100755
index 0000000..378f280
--- /dev/null
+++ b/odf/odf-spark/pom.xml
@@ -0,0 +1,242 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-spark</artifactId>
+	<packaging>jar</packaging>
+	<name>odf-spark</name>
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-store</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>test</scope>
+		</dependency>
+		<!-- Workaround: Add odf-spark-example-application because dynamic jar load does not seem to work on IBM JDK -->
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-spark-example-application</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-launcher_2.11</artifactId>
+			<version>2.1.0</version>
+		</dependency>
+		<dependency>
+			<groupId>commons-io</groupId>
+			<artifactId>commons-io</artifactId>
+			<version>2.4</version>
+		</dependency>
+		<!-- The following Spark dependencies are needed for testing only. -->
+		<!-- Nevertheless, they have to be added as compile dependencies in order to become available to the SDPFactory. -->
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-core_2.11</artifactId>
+			<version>2.1.0</version>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.spark</groupId>
+			<artifactId>spark-sql_2.11</artifactId>
+			<version>2.1.0</version>
+			<exclusions>
+				<exclusion>
+					<groupId>commons-codec</groupId>
+					<artifactId>commons-codec</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+	</dependencies>
+	<build>
+		<resources>
+			<resource>
+				<directory>${project.build.directory}/downloads</directory>
+			</resource>
+		</resources>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-jar-plugin</artifactId>
+				<version>2.6</version>
+				<executions>
+					<execution>
+						<goals>
+							<goal>test-jar</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+					</systemPropertyVariables>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-dependency-plugin</artifactId>
+				<version>2.4</version>
+				<executions>
+					<execution>
+						<id>download-jar-file</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>copy</goal>
+						</goals>
+						<configuration>
+							<artifactItems>
+								<artifactItem>
+									<groupId>org.apache.atlas.odf</groupId>
+									<artifactId>odf-api</artifactId>
+									<version>1.2.0-SNAPSHOT</version>
+									<type>jar</type>
+									<overWrite>true</overWrite>
+									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
+								</artifactItem>
+								<artifactItem>
+									<groupId>org.apache.atlas.odf</groupId>
+									<artifactId>odf-spark-example-application</artifactId>
+									<version>1.2.0-SNAPSHOT</version>
+									<type>jar</type>
+									<overWrite>true</overWrite>
+									<outputDirectory>/tmp/odf-spark</outputDirectory>
+								</artifactItem>
+								<artifactItem>
+									<groupId>org.apache.atlas.odf</groupId>
+									<artifactId>odf-spark-example-application</artifactId>
+									<version>1.2.0-SNAPSHOT</version>
+									<type>jar</type>
+									<overWrite>true</overWrite>
+									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
+								</artifactItem>
+								<artifactItem>
+									<groupId>org.apache.wink</groupId>
+									<artifactId>wink-json4j</artifactId>
+									<version>1.4</version>
+									<type>jar</type>
+									<overWrite>true</overWrite>
+									<outputDirectory>${project.build.directory}/downloads/META-INF/spark</outputDirectory>
+								</artifactItem>
+							</artifactItems>
+							<includes>**/*</includes>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+
+	<profiles>
+		<profile>
+			<id>integration-tests</id>
+			<activation>
+				<property>
+					<name>reduced-tests</name>
+					<value>!true</value>
+				</property>
+			</activation>
+			<build>
+				<plugins>
+					<plugin>
+						<groupId>org.apache.maven.plugins</groupId>
+						<artifactId>maven-failsafe-plugin</artifactId>
+						<version>2.19</version>
+						<configuration>
+							<systemPropertyVariables>
+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+								<odf.logspec>${odf.integrationtest.logspec}</odf.logspec>
+							</systemPropertyVariables>
+							<dependenciesToScan>
+								<dependency>org.apache.atlas.odf:odf-core</dependency>
+							</dependenciesToScan>
+							<includes>
+								<include>**/integrationtest/**/SparkDiscoveryServiceLocalTest.java</include>
+							</includes>
+						</configuration>
+						<executions>
+							<execution>
+								<id>integration-test</id>
+								<goals>
+									<goal>integration-test</goal>
+								</goals>
+							</execution>
+							<execution>
+								<id>verify</id>
+								<goals>
+									<goal>verify</goal>
+								</goals>
+							</execution>
+						</executions>
+					</plugin>
+				</plugins>
+			</build>
+		</profile>
+	</profiles>
+
+</project>
diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java
new file mode 100755
index 0000000..84ae80c
--- /dev/null
+++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/LocalSparkServiceExecutor.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.spark;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.reflect.Constructor;
+import java.text.MessageFormat;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceResponse;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.spark.SparkDiscoveryService;
+import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.spark.SparkUtils;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+ * This class calls the actual Spark discovery services depending on the type of interface they implement.
+ * The class is used to run a Spark discovery service either on a local Spark cluster ({@link SparkServiceExecutorImpl})
+ * or on a remote Spark cluster ({@link SparkApplicationStub}).
+ * 
+ *
+ */
+
+public class LocalSparkServiceExecutor implements SparkServiceExecutor {
+	private Logger logger = Logger.getLogger(LocalSparkServiceExecutor.class.getName());
+	private SparkSession spark;
+	private MetadataStore mds;
+
+	void setSparkSession(SparkSession spark) {
+		this.spark = spark;
+	}
+
+	void setMetadataStore(MetadataStore mds) {
+		this.mds = mds;
+	}
+
+	@Override
+	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsProp, DataSetContainer container) {
+		DiscoveryServiceSparkEndpoint endpoint;
+		try {
+			endpoint = JSONUtils.convert(dsProp.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
+		} catch (JSONException e1) {
+			throw new RuntimeException(e1);
+		}
+		DataSetCheckResult checkResult = new DataSetCheckResult();
+		try {
+			SERVICE_INTERFACE_TYPE inputMethod = endpoint.getInputMethod();
+			if (inputMethod.equals(SERVICE_INTERFACE_TYPE.DataFrame)) {
+				MetaDataObject dataSet = container.getDataSet();
+				if (!(dataSet instanceof RelationalDataSet)) {
+					checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
+					checkResult.setDetails("This service can only process relational data sets.");
+				} else {
+					checkResult.setDataAccess(DataSetCheckResult.DataAccess.Possible);
+					Dataset<Row> df = SparkUtils.createDataFrame(this.spark, container, this.mds);
+					// Print first rows to check whether data frame can be accessed
+					df.show(10);
+				}
+			} else if (inputMethod.equals(SERVICE_INTERFACE_TYPE.Generic)) {
+				Class<?> clazz = Class.forName(endpoint.getClassName());
+				Constructor<?> cons = clazz.getConstructor();
+				SparkDiscoveryService service = (SparkDiscoveryService) cons.newInstance();
+				service.setMetadataStore(this.mds);
+				service.setSparkSession(this.spark);
+				checkResult = service.checkDataSet(container);
+			}
+		} catch (Exception e) {
+			logger.log(Level.WARNING,"Access to data set not possible.", e);
+			checkResult.setDataAccess(DataSetCheckResult.DataAccess.NotPossible);
+			checkResult.setDetails(getExceptionAsString(e));
+		} finally {
+			this.spark.close();
+			this.spark.stop();
+		}
+		return checkResult;
+	}
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsProp, DiscoveryServiceRequest request) {
+		DiscoveryServiceSyncResponse response = new DiscoveryServiceSyncResponse();
+		response.setDetails("Annotations created successfully");
+		response.setCode(DiscoveryServiceResponse.ResponseCode.OK);
+		try {
+			DiscoveryServiceSparkEndpoint endpoint = JSONUtils.convert(dsProp.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
+			Class<?> clazz = Class.forName(endpoint.getClassName());
+			DataSetContainer container = request.getDataSetContainer();
+			String[] optionalArgs = {}; // For future use
+			SERVICE_INTERFACE_TYPE inputMethod = endpoint.getInputMethod();
+
+			if (inputMethod.equals(SERVICE_INTERFACE_TYPE.DataFrame)) {
+				if (!(container.getDataSet() instanceof RelationalDataSet)) {
+					throw new RuntimeException("This service can only process relational data sets (DataFile or Table).");
+				}
+				Dataset<Row> df = SparkUtils.createDataFrame(this.spark, container, this.mds);
+				@SuppressWarnings("unchecked")
+				Map<String, Dataset<Row>> annotationDataFrameMap = (Map<String, Dataset<Row>>) clazz.getMethod("processDataFrame", SparkSession.class, Dataset.class, String[].class).invoke(null, this.spark, df, (Object[]) optionalArgs);
+				response.setResult(SparkUtils.createAnnotationsFromDataFrameMap(container, annotationDataFrameMap, this.mds));
+			} else if (inputMethod.equals(SERVICE_INTERFACE_TYPE.Generic)) {
+				Constructor<?> cons = clazz.getConstructor();
+				SparkDiscoveryService service = (SparkDiscoveryService) cons.newInstance();
+				service.setMetadataStore(this.mds);
+				service.setSparkSession(this.spark);
+				response = service.runAnalysis(request);
+			} else {
+				throw new RuntimeException(MessageFormat.format("Unsupported interface type {0}.", inputMethod));
+			}
+		} catch(Exception e) {
+			logger.log(Level.WARNING,"Error running discovery service.", e);
+			response.setDetails(getExceptionAsString(e));
+			response.setCode(DiscoveryServiceResponse.ResponseCode.UNKNOWN_ERROR);
+		} finally {
+			this.spark.close();
+			this.spark.stop();
+		}
+		return response;
+	}
+
+	public static String getExceptionAsString(Throwable exc) {
+		StringWriter sw = new StringWriter();
+		PrintWriter pw = new PrintWriter(sw);
+		exc.printStackTrace(pw);
+		String st = sw.toString();
+		return st;
+	}
+}
diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java
new file mode 100755
index 0000000..81fea2c
--- /dev/null
+++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkJars.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.spark;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.text.MessageFormat;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+
+import org.apache.atlas.odf.core.Utils;
+
+public class SparkJars {
+	private static Logger logger = Logger.getLogger(SparkJars.class.getName());
+
+	public String getResourceAsJarFile(String resource) {
+		ClassLoader cl = this.getClass().getClassLoader();
+		InputStream inputStream = cl.getResourceAsStream(resource);
+		if (inputStream == null) {
+        	String msg = MessageFormat.format("Resource {0} was not found.", resource);
+        	logger.log(Level.WARNING, msg);
+        	throw new RuntimeException(msg);
+		}
+		String tempFilePath = null;
+		try {
+		    File tempFile = File.createTempFile("driver", "jar");
+		    tempFilePath = tempFile.getAbsolutePath();
+		    logger.log(Level.INFO, "Creating temporary file " + tempFilePath);
+			IOUtils.copy(inputStream, new FileOutputStream(tempFile));
+			inputStream.close();
+			Utils.runSystemCommand("chmod 755 " + tempFilePath);
+		} catch (IOException e) {
+        	String msg = MessageFormat.format("Error creating temporary file from resource {0}: ", resource);
+        	logger.log(Level.WARNING, msg, e);
+        	throw new RuntimeException(msg + Utils.getExceptionAsString(e));
+		}
+		return tempFilePath;
+	}
+
+	public String getUrlasJarFile(String urlString) {
+		try {
+		    File tempFile = File.createTempFile("driver", "jar");
+	    	logger.log(Level.INFO, "Creating temporary file " + tempFile);
+			FileUtils.copyURLToFile(new URL(urlString), tempFile);
+			Utils.runSystemCommand("chmod 755 " + tempFile.getAbsolutePath());
+			return tempFile.getAbsolutePath();
+		} catch (MalformedURLException e) {
+			String msg = MessageFormat.format("An invalid Spark application URL {0} was provided: ", urlString);
+			logger.log(Level.WARNING, msg, e);
+			throw new RuntimeException(msg + Utils.getExceptionAsString(e));
+		} catch (IOException e) {
+			logger.log(Level.WARNING, "Error processing Spark application jar file.", e);
+			throw new RuntimeException("Error processing Spark application jar file: " + Utils.getExceptionAsString(e));
+		}
+	}
+
+	public byte[] getFileAsByteArray(String resourceOrURL) {
+        try {
+        	InputStream inputStream;
+        	if (isValidUrl(resourceOrURL)) {
+            	inputStream = new URL(resourceOrURL).openStream();
+        	} else {
+        		ClassLoader cl = this.getClass().getClassLoader();
+        		inputStream = cl.getResourceAsStream(resourceOrURL);
+        		if (inputStream == null) {
+                	String msg = MessageFormat.format("Resource {0} was not found.", resourceOrURL);
+                	logger.log(Level.WARNING, msg);
+                	throw new RuntimeException(msg);
+        		}
+        	}
+        	byte[] bytes = IOUtils.toByteArray(inputStream);
+        	return bytes;
+        } catch (IOException e) {
+        	String msg = MessageFormat.format("Error converting jar file {0} into byte array: ", resourceOrURL);
+        	logger.log(Level.WARNING, msg, e);
+        	throw new RuntimeException(msg + Utils.getExceptionAsString(e));
+        }
+	}
+
+	public static boolean isValidUrl(String urlString) {
+		try {
+			new URL(urlString);
+			return true;
+		} catch (java.net.MalformedURLException exc) {
+			// Expected exception if URL is not valid
+			return false;
+		}
+	}
+}
diff --git a/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java
new file mode 100755
index 0000000..720343b
--- /dev/null
+++ b/odf/odf-spark/src/main/java/org/apache/atlas/odf/core/spark/SparkServiceExecutorImpl.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.spark;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.text.MessageFormat;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.discoveryservice.DataSetCheckResult;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRequest;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.api.spark.SparkServiceExecutor;
+import org.apache.spark.sql.SparkSession;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.datasets.DataSetContainer;
+import org.apache.atlas.odf.api.discoveryservice.sync.DiscoveryServiceSyncResponse;
+import org.apache.atlas.odf.api.settings.SparkConfig;
+import org.apache.atlas.odf.json.JSONUtils;
+
+/**
+ * Calls the appropriate implementation (local vs. remote) of the @link SparkServiceExecutor depending on the current @SparkConfig.
+ * Prepares the local Spark cluster to be used in unit and integration tests.
+ * 
+ *
+ */
+
+public class SparkServiceExecutorImpl implements SparkServiceExecutor {
+	private Logger logger = Logger.getLogger(SparkServiceExecutorImpl.class.getName());
+
+	@Override
+	public DataSetCheckResult checkDataSet(DiscoveryServiceProperties dsri, DataSetContainer dataSetContainer) {
+		return this.getExecutor(dsri).checkDataSet(dsri, dataSetContainer);
+	};
+
+	@Override
+	public DiscoveryServiceSyncResponse runAnalysis(DiscoveryServiceProperties dsri, DiscoveryServiceRequest request) {
+		return this.getExecutor(dsri).runAnalysis(dsri, request);
+	}
+
+	private SparkServiceExecutor getExecutor(DiscoveryServiceProperties dsri) {
+		SettingsManager config = new ODFFactory().create().getSettingsManager();
+		DiscoveryServiceSparkEndpoint endpoint;
+		try {
+			endpoint = JSONUtils.convert(dsri.getEndpoint(), DiscoveryServiceSparkEndpoint.class);
+		} catch (JSONException e1) {
+			throw new RuntimeException(e1);
+		}
+
+		SparkConfig sparkConfig = config.getODFSettings().getSparkConfig();
+		if (sparkConfig == null) {
+			String msg = "No Spark service is configured. Please manually register Spark service or bind a Spark service to your ODF Bluemix app.";
+			logger.log(Level.SEVERE, msg);
+			throw new RuntimeException(msg);
+		} else {
+			logger.log(Level.INFO, "Using local Spark cluster {0}.", sparkConfig.getClusterMasterUrl());
+			SparkSession spark = SparkSession.builder().master(sparkConfig.getClusterMasterUrl()).appName(dsri.getName()).getOrCreate();
+			SparkJars sparkJars = new SparkJars();
+			try {
+			    // Load jar file containing the Spark job to be started
+			    URLClassLoader classLoader = (URLClassLoader)ClassLoader.getSystemClassLoader();
+				Method method = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
+			    method.setAccessible(true);
+			    String applicationJarFile;
+				if (SparkJars.isValidUrl(endpoint.getJar())) {
+					applicationJarFile = sparkJars.getUrlasJarFile(endpoint.getJar());
+				} else {
+					applicationJarFile = sparkJars.getResourceAsJarFile(endpoint.getJar());
+				}
+				logger.log(Level.INFO, "Using application jar file {0}.", applicationJarFile);
+			    method.invoke(classLoader, new URL("file:" + applicationJarFile));
+			} catch (Exception e) {
+				String msg = MessageFormat.format("Error loading jar file {0} implementing the Spark discovery service: ", endpoint.getJar());
+				logger.log(Level.WARNING, msg, e);
+				spark.close();
+				spark.stop();
+				throw new RuntimeException(msg, e);
+			}
+			LocalSparkServiceExecutor executor = new LocalSparkServiceExecutor();
+			executor.setSparkSession(spark);
+			executor.setMetadataStore(new ODFFactory().create().getMetadataStore());
+		    return executor;
+		}
+	}
+}
diff --git a/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..b050f50
--- /dev/null
+++ b/odf/odf-spark/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,14 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+org.apache.atlas.odf.api.spark.SparkServiceExecutor=org.apache.atlas.odf.core.spark.SparkServiceExecutorImpl
diff --git a/odf/odf-store/.gitignore b/odf/odf-store/.gitignore
new file mode 100755
index 0000000..ea5ddb8
--- /dev/null
+++ b/odf/odf-store/.gitignore
@@ -0,0 +1,5 @@
+.settings
+target
+.classpath
+.project
+.factorypath
\ No newline at end of file
diff --git a/odf/odf-store/pom.xml b/odf/odf-store/pom.xml
new file mode 100755
index 0000000..3d0a93d
--- /dev/null
+++ b/odf/odf-store/pom.xml
@@ -0,0 +1,87 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-store</artifactId>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.zookeeper</groupId>
+			<artifactId>zookeeper</artifactId>
+			<version>3.4.6</version>
+			<scope>compile</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+	</dependencies>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+					</systemPropertyVariables>
+					<dependenciesToScan>
+						<dependency>org.apache.atlas.odf:odf-core</dependency>
+					</dependenciesToScan>
+					<includes>
+					    <include>**/configuration/**/*.java</include>
+						<include>**/ZookeeperConfigurationStorageTest.java</include>
+					</includes>
+				</configuration>
+			</plugin>
+		</plugins>
+	</build>
+
+</project>
diff --git a/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java b/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java
new file mode 100755
index 0000000..3ea9927
--- /dev/null
+++ b/odf/odf-store/src/main/java/org/apache/atlas/odf/core/store/zookeeper34/ZookeeperConfigurationStorage.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store.zookeeper34;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.text.MessageFormat;
+import java.util.HashSet;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.core.Environment;
+import org.apache.atlas.odf.core.ODFInternalFactory;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.Code;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+import org.apache.atlas.odf.core.store.ODFConfigurationStorage;
+
+public class ZookeeperConfigurationStorage implements ODFConfigurationStorage {
+	private Logger logger = Logger.getLogger(ZookeeperConfigurationStorage.class.getName());
+	static final String ZOOKEEPER_CONFIG_PATH = "/odf/config";
+	static String configCache = null; // cache is a string so that the object is not accidentally modified
+	static Object configCacheLock = new Object();
+	static HashSet<String> pendingConfigChanges = new HashSet<String>();
+
+	String zookeeperString;
+
+	public ZookeeperConfigurationStorage() {
+		zookeeperString = new ODFInternalFactory().create(Environment.class).getZookeeperConnectString();
+	}
+
+	public void clearCache() {
+		synchronized (configCacheLock) {
+			configCache = null;
+		}
+	}
+	
+	@Override
+	public void storeConfig(ConfigContainer config) {
+		synchronized (configCacheLock) {
+			ZooKeeper zk = null;
+			String configTxt = null;
+			try {
+				configTxt = JSONUtils.toJSON(config);
+				zk = getZkConnectionSynchronously();
+				if (zk.exists(getZookeeperConfigPath(), false) == null) {
+					//config file doesn't exist in zookeeper yet, write default config
+					logger.log(Level.WARNING, "Zookeeper config not found - creating it before writing: {0}", configTxt);
+					initializeConfiguration(zk, configTxt);
+				}
+				zk.setData(getZookeeperConfigPath(), configTxt.getBytes("UTF-8"), -1);
+				configCache = configTxt;
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+				throw new RuntimeException("A zookeeper connection could not be established in time to write settings");
+			} catch (KeeperException e) {
+				if (Code.NONODE.equals(e.code())) {
+					logger.info("Setting could not be written, the required node is not available!");
+					initializeConfiguration(zk, configTxt);
+					return;
+				}
+				//This should never happen! Only NoNode or BadVersion codes are possible. Because the file version is ignored, a BadVersion should never occur
+				throw new RuntimeException("A zookeeper connection could not be established because of an unknown exception", e);
+			} catch (UnsupportedEncodingException e) {
+				throw new RuntimeException("A zookeeper connection could not be established because of an incorrect encoding");
+			} catch (JSONException e) {
+				throw new RuntimeException("Configuration is not valid", e);
+			} finally {
+				if (zk != null) {
+					try {
+						zk.close();
+					} catch (InterruptedException e) {
+						e.printStackTrace();
+					}
+				}
+			}
+		}
+	}
+
+	@Override
+	public ConfigContainer getConfig(ConfigContainer defaultConfiguration) {
+		synchronized (configCacheLock) {
+			if (configCache == null) {
+				ZooKeeper zk = getZkConnectionSynchronously();
+				try {
+					if (zk.exists(getZookeeperConfigPath(), false) == null) {
+						//config file doesn't exist in zookeeper yet, write default config
+						String defaultConfigString = JSONUtils.toJSON(defaultConfiguration);
+						logger.log(Level.WARNING, "Zookeeper config not found - creating now with default: {0}", defaultConfigString);
+						initializeConfiguration(zk, defaultConfigString);
+					}
+					byte[] configBytes = zk.getData(getZookeeperConfigPath(), true, new Stat());
+					if (configBytes != null) {
+						String configString = new String(configBytes, "UTF-8");
+						configCache = configString;
+					} else {
+						// should never happen
+						throw new RuntimeException("Zookeeper configuration was not stored");
+					}
+				} catch (KeeperException e) {
+					throw new RuntimeException(MessageFormat.format("Zookeeper config could not be read, {0} Zookeeper exception occured!", e.code().name()), e);
+				} catch (InterruptedException e) {
+					throw new RuntimeException("Zookeeper config could not be read, the connection was interrupded", e);
+				} catch (IOException | JSONException e) {
+					throw new RuntimeException("Zookeeper config could not be read, the file could not be parsed correctly", e);
+				} finally {
+					if (zk != null) {
+						try {
+							zk.close();
+						} catch (InterruptedException e) {
+							e.printStackTrace();
+						}
+
+					}
+				}
+
+			}
+			try {
+				return JSONUtils.fromJSON(configCache, ConfigContainer.class);
+			} catch (JSONException e) {
+				throw new RuntimeException("Cached configuration was not valid", e);
+			}
+		}
+	}
+
+	private void initializeConfiguration(ZooKeeper zk, String config) {
+		try {
+			if (getZookeeperConfigPath().contains("/")) {
+				String[] nodes = getZookeeperConfigPath().split("/");
+				StringBuilder path = new StringBuilder();
+				for (String node : nodes) {
+					if (node.trim().equals("")) {
+						//ignore empty paths
+						continue;
+					}
+					path.append("/" + node);
+					try {
+						zk.create(path.toString(), new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+					} catch (NodeExistsException ex) {
+						//ignore if node already exists and continue with next node
+					}
+				}
+			}
+
+			//use version -1 to ignore versioning conflicts
+			try {
+				zk.setData(getZookeeperConfigPath(), config.toString().getBytes("UTF-8"), -1);
+			} catch (UnsupportedEncodingException e) {
+				// should not happen
+				throw new RuntimeException(e);
+			}
+		} catch (KeeperException e) {
+			throw new RuntimeException(MessageFormat.format("The zookeeper config could not be initialized, a Zookeeper exception of type {0} occured!", e.code().name()), e);
+		} catch (InterruptedException e) {
+			throw new RuntimeException("The zookeeper config could not be initialized, the connection got interrupted!", e);
+		}
+	}
+
+	private ZooKeeper getZkConnectionSynchronously() {
+		final CountDownLatch latch = new CountDownLatch(1);
+		logger.log(Level.FINE, "Trying to connect to zookeeper at {0}", zookeeperString);
+		ZooKeeper zk = null;
+		try {
+			int timeout = 5;
+			zk = new ZooKeeper(zookeeperString, timeout * 1000, new Watcher() {
+
+				@Override
+				public void process(WatchedEvent event) {
+					if (event.getState().equals(Watcher.Event.KeeperState.ConnectedReadOnly) || event.getState().equals(Watcher.Event.KeeperState.SyncConnected)) {
+						//count down latch, connected successfully to zk
+						latch.countDown();
+					}
+				}
+			});
+			//block thread till countdown, maximum of "timeout" seconds
+			latch.await(5 * timeout, TimeUnit.SECONDS);
+			if (latch.getCount() > 0) {
+				zk.close();
+				throw new RuntimeException("The zookeeper connection could not be retrieved on time!");
+			}
+			return zk;
+		} catch (IOException e1) {
+			throw new RuntimeException("The zookeeper connection could not be retrieved, the connection failed!", e1);
+		} catch (InterruptedException e) {
+			throw new RuntimeException("Zookeeper connection could not be retrieved, the thread was interrupted!", e);
+		}
+	}
+
+	public String getZookeeperConfigPath() {
+		return ZOOKEEPER_CONFIG_PATH;
+	}
+
+	@Override
+	public void onConfigChange(ConfigContainer container) {
+		synchronized (configCacheLock) {
+			try {
+				configCache = JSONUtils.toJSON(container);
+			} catch (JSONException e) {
+				throw new RuntimeException("Config could not be cloned!", e);
+			}
+		}
+	}
+
+	@Override
+	public void addPendingConfigChange(String changeId) {
+		synchronized (configCacheLock) {
+			pendingConfigChanges.add(changeId);
+		}
+	}
+
+	@Override
+	public void removePendingConfigChange(String changeId) {
+		synchronized (configCacheLock) {
+			pendingConfigChanges.remove(changeId);
+		}
+	}
+
+	@Override
+	public boolean isConfigChangePending(String changeId) {
+		synchronized (configCacheLock) {
+			return pendingConfigChanges.contains(changeId);
+		}
+	}
+}
diff --git a/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties b/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties
new file mode 100755
index 0000000..7234e9c
--- /dev/null
+++ b/odf/odf-store/src/main/resources/org/apache/atlas/odf/core/internal/zookeeper/test-embedded-zookeeper.properties
@@ -0,0 +1,34 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=/tmp/odf-embedded-test-kafka/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
+
diff --git a/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..6124d42
--- /dev/null
+++ b/odf/odf-store/src/main/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,14 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+org.apache.atlas.odf.core.store.ODFConfigurationStorage=org.apache.atlas.odf.core.store.zookeeper34.ZookeeperConfigurationStorage
diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
new file mode 100755
index 0000000..9650bd6
--- /dev/null
+++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/TestZookeeper.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store.zookeeper34.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.BindException;
+import java.net.DatagramSocket;
+import java.net.ServerSocket;
+import java.rmi.NotBoundException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooKeeper.States;
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+
+import org.apache.atlas.odf.core.Utils;
+
+public class TestZookeeper {
+
+	public TestZookeeper() {
+	}
+
+	public void start() {
+		try {
+			startZookeeper();
+		} catch (Exception e) {
+			e.printStackTrace();
+			throw new RuntimeException(e);
+		}
+	}
+
+	public static boolean deleteRecursive(File path) throws FileNotFoundException {
+		if (!path.exists()) {
+			throw new FileNotFoundException(path.getAbsolutePath());
+		}
+		boolean ret = true;
+		if (path.isDirectory()) {
+			for (File f : path.listFiles()) {
+				ret = ret && deleteRecursive(f);
+			}
+		}
+		return ret && path.delete();
+	}
+
+	static Thread zookeeperThread = null;
+	static Object lockObject = new Object();
+	static ZooKeeperServerMainWithShutdown zooKeeperServer = null;
+
+	boolean cleanData = true; // all data is cleaned at server start !!
+
+	Logger logger = Logger.getLogger(TestZookeeper.class.getName());
+
+	void log(String s) {
+		logger.info(s);
+	}
+
+	int zookeeperStartupTime = 10000;
+
+	static class ZooKeeperServerMainWithShutdown extends ZooKeeperServerMain {
+		public void shutdown() {
+			super.shutdown();
+		}
+	}
+
+	private void startZookeeper() throws Exception {
+		log("Starting zookeeper");
+
+		final Properties zkProps = Utils.readConfigProperties("org/apache/atlas/odf/core/messaging/kafka/test-embedded-zookeeper.properties");
+		log("zookeeper properties: " + zkProps);
+		if (cleanData) {
+			String dataDir = zkProps.getProperty("dataDir");
+			log("Removing all data from zookeeper data dir " + dataDir);
+			File dir = new File(dataDir);
+			if (dir.exists()) {
+				if (!deleteRecursive(dir)) {
+					throw new IOException("Could not delete directory " + dataDir);
+				}
+			}
+		}
+		final ZooKeeperServerMainWithShutdown zk = new ZooKeeperServerMainWithShutdown();
+		final ServerConfig serverConfig = new ServerConfig();
+		log("Loading zookeeper config...");
+		QuorumPeerConfig zkConfig = new QuorumPeerConfig();
+		zkConfig.parseProperties(zkProps);
+		serverConfig.readFrom(zkConfig);
+		final String zkPort = (String) zkProps.get("clientPort");
+
+		Runnable zookeeperStarter = new Runnable() {
+
+			@Override
+			public void run() {
+				try {
+					log("Now starting Zookeeper with API...");
+					zk.runFromConfig(serverConfig);
+				} catch (BindException ex) {
+					log("Embedded zookeeper could not be started, port is already in use. Trying to use external zookeeper");
+					ZooKeeper zK = null;
+					try {
+						zK = new ZooKeeper("localhost:" + zkPort, 5000, null);
+						if (zK.getState().equals(States.CONNECTED)) {
+							log("Using existing zookeeper running on port " + zkPort);
+							return;
+						} else {
+							throw new NotBoundException();
+						}
+					} catch (Exception zkEx) {
+						throw new RuntimeException("Could not connect to zookeeper on port " + zkPort + ". Please close all applications listening on this port.");
+					} finally {
+						if (zK != null) {
+							try {
+								zK.close();
+							} catch (InterruptedException e) {
+								logger.log(Level.WARNING, "An error occured closing the zk connection", e);
+							}
+						}
+					}
+				} catch (Exception e) {
+					e.printStackTrace();
+					throw new RuntimeException(e);
+				}
+
+			}
+		};
+
+		zookeeperThread = new Thread(zookeeperStarter);
+		zookeeperThread.setDaemon(true);
+		zookeeperThread.start();
+		log("Zookeeper start initiated, waiting 10s...");
+		Thread.sleep(10000);
+		zooKeeperServer = zk;
+		log("Zookeeper started");
+
+	}
+
+	public boolean isRunning() {
+		return zooKeeperServer != null;
+	}
+
+	boolean isPortAvailable(int port) {
+		ServerSocket ss = null;
+		DatagramSocket ds = null;
+		try {
+			ss = new ServerSocket(port);
+			ss.setReuseAddress(true);
+			ds = new DatagramSocket(port);
+			ds.setReuseAddress(true);
+			return true;
+		} catch (IOException e) {
+		} finally {
+			if (ds != null) {
+				ds.close();
+			}
+
+			if (ss != null) {
+				try {
+					ss.close();
+				} catch (IOException e) {
+				}
+			}
+		}
+
+		return false;
+	}
+}
diff --git a/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
new file mode 100755
index 0000000..1db55f2
--- /dev/null
+++ b/odf/odf-store/src/test/java/org/apache/atlas/odf/core/store/zookeeper34/test/ZookeeperConfigurationStorageTest.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.core.store.zookeeper34.test;
+
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.core.configuration.ConfigContainer;
+import org.apache.atlas.odf.core.store.zookeeper34.ZookeeperConfigurationStorage;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * this test uses the real storage implementation therefore a zookeeper is required
+ */
+public class ZookeeperConfigurationStorageTest {
+	@BeforeClass
+	public static void setup() {
+		new TestZookeeper().start();
+	}
+
+	@Test
+	public void testStoreInZookeeper() {
+		ZookeeperConfigurationStorage store = new ZookeeperConfigurationStorage() {
+
+			@Override
+			public String getZookeeperConfigPath() {
+				return "/odf/testconfig";
+			}
+			
+		};
+		ConfigContainer container = new ConfigContainer();
+		ODFSettings odfConfig = new ODFSettings();
+		String instanceId = "my_test_id";
+		odfConfig.setInstanceId(instanceId);
+		container.setOdf(odfConfig);
+		store.storeConfig(container);
+
+		ConfigContainer updatedContainer = store.getConfig(null);
+		Assert.assertEquals(instanceId, updatedContainer.getOdf().getInstanceId());
+		store.clearCache();
+		
+	}
+}
diff --git a/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
new file mode 100755
index 0000000..894ecb5
--- /dev/null
+++ b/odf/odf-store/src/test/resources/org/apache/atlas/odf/odf-implementation.properties
@@ -0,0 +1,16 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+## USE for TESTs only
+
+org.apache.atlas.odf.core.messaging.DiscoveryServiceQueueManager=org.apache.atlas.odf.core.test.messaging.MockQueueManager
diff --git a/odf/odf-test-env/.gitignore b/odf/odf-test-env/.gitignore
new file mode 100755
index 0000000..2045ff3
--- /dev/null
+++ b/odf/odf-test-env/.gitignore
@@ -0,0 +1,5 @@
+target
+.settings
+.classpath
+.project
+.DS_Store
diff --git a/odf/odf-test-env/pom.xml b/odf/odf-test-env/pom.xml
new file mode 100755
index 0000000..a37ed22
--- /dev/null
+++ b/odf/odf-test-env/pom.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+	xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-test-env</artifactId>
+	<name>odf-test-env</name>
+	<url>http://maven.apache.org</url>
+	<properties>
+		<!-- specify versions of components to be downloaded -->
+		<jetty.version>9.2.10.v20150310</jetty.version>
+		<kafka.version>0.10.0.0</kafka.version>
+		<scala.version>2.11</scala.version>
+		<spark.version>2.1.0</spark.version>
+		<jetty.port>58081</jetty.port>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-web</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>war</type>
+		</dependency>
+	</dependencies>
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<executions>
+					<execution>
+						<id>default-compile</id>
+						<phase>compile</phase>
+						<goals>
+							<goal>compile</goal>
+						</goals>
+						<configuration>
+							<skipMain>true</skipMain>
+							<!-- do not compile anything -->
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<skipTests>true</skipTests>
+					<!-- do not run tests -->
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<id>prepare-atlas</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="atlas-unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="atlas.version" value="${atlas.version}" />
+								<ant antfile="../odf-atlas/build_atlas.xml" target="prepare-atlas"></ant>
+							</target>
+						</configuration>
+					</execution>
+					<execution>
+						<id>prepare-components</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="jetty.version" value="${jetty.version}" />
+								<property name="jetty.port" value="${jetty.port}" />
+								<property name="kafka.version" value="${kafka.version}" />
+								<property name="scala.version" value="${scala.version}" />
+								<property name="project.basedir" value="${project.basedir}"/>
+								<ant antfile="prepare_components.xml" target="default"></ant>
+							</target>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-assembly-plugin</artifactId>
+				<configuration>
+					<descriptor>src/assembly/bin.xml</descriptor>
+					<finalName>odf-test-env-${project.version}</finalName>
+				</configuration>
+				<executions>
+					<execution>
+						<id>create-distribution</id>
+						<phase>package</phase>
+						<goals>
+							<goal>single</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<artifactId>maven-jar-plugin</artifactId>
+				<version>2.3.1</version>
+				<executions>
+					<execution>
+						<id>default-jar</id>
+						<!-- do not create default-jar -->
+						<phase>none</phase>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+</project>
diff --git a/odf/odf-test-env/prepare_components.xml b/odf/odf-test-env/prepare_components.xml
new file mode 100755
index 0000000..a6a733b
--- /dev/null
+++ b/odf/odf-test-env/prepare_components.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="prepare_components">
+
+	<!-- Property is provided by pom.xml -->
+	<!-- <property name="jetty.version" value="" /> -->
+	<!-- <property name="kafka.version" value="" /> -->
+	<!-- <property name="scala.version" value="" /> -->
+
+	<dirname property="script.basedir" file="${ant.file.prepare_components}" />
+
+	<property name="jetty-dir" value="jetty-distribution-${jetty.version}" />
+	<property name="kafka-dir" value="kafka_${scala.version}-${kafka.version}" />
+	<property name="spark-dir" value="spark_${spark.version}" />
+
+	<property name="jetty-archive" value="/tmp/${jetty-dir}.zip" />
+	<property name="kafka-archive" value="/tmp/${kafka-dir}.tar.gz" />
+	<property name="spark-archive" value="/tmp/${spark-dir}.tar.gz" />
+
+	<condition property="jetty-zip-not-found">
+		<not>
+			<available file="${jetty-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="kafka-zip-not-found">
+		<not>
+			<available file="${kafka-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="spark-zip-not-found">
+		<not>
+			<available file="${spark-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="jetty-unpacked">
+	   <available file="${unpack-dir}/${jetty-dir}/bin/jetty.sh"/>
+    </condition>
+
+	<condition property="kafka-unpacked">
+	   <available file="${unpack-dir}/${kafka-dir}/bin/kafka-server-start.sh"/>
+    </condition>
+
+	<condition property="spark-unpacked">
+	   <available file="${unpack-dir}/${spark-dir}/sbin/start-master.sh"/>
+    </condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-jetty" if="jetty-zip-not-found">
+		<echo message="Downloading Jetty. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="https://repo1.maven.org/maven2/org/eclipse/jetty/jetty-distribution/${jetty.version}/jetty-distribution-${jetty.version}.zip" dest="${jetty-archive}" />
+		<echo message="Jetty downloaded" />
+	</target>
+
+	<target name="download-kafka" if="kafka-zip-not-found">
+		<echo message="Downloading Kafka. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="http://ftp-stud.hs-esslingen.de/pub/Mirrors/ftp.apache.org/dist/kafka/${kafka.version}/kafka_${scala.version}-${kafka.version}.tgz" dest="${kafka-archive}" />
+		<echo message="Kafka downloaded" />
+	</target>
+
+	<target name="download-spark" if="spark-zip-not-found">
+		<echo message="Downloading Spark. Depending on your network this can last up to 20 (yes, twenty) minutes." />
+		<get verbose="true" src="http://d3kbcqa49mib13.cloudfront.net/spark-${spark.version}-bin-hadoop2.7.tgz" dest="${spark-archive}" />
+		<echo message="Spark downloaded" />
+	</target>
+
+	<target name="unzip-jetty" unless="jetty-unpacked">
+		<antcall target="download-jetty"/>
+		<echo message="Installing Jetty test instance" />
+		<echo message="Deleting ${unpack-dir}/${jetty-dir}" />
+		<delete dir="${unpack-dir}/${jetty-dir}" />
+		<echo message="deleted" />
+		<unzip src="${jetty-archive}" dest="${unpack-dir}" />
+		<!-- Create Jetty base folder -->
+		<mkdir dir="${unpack-dir}/odfjettybase"/>
+		<!-- Generate Jetty base configuration files -->
+		<java dir="${unpack-dir}/odfjettybase" classname="org.eclipse.jetty.start.Main" fork="true">
+			<arg value="--add-to-startd=https,ssl,deploy,plus"/>
+			<classpath>
+				<pathelement location="${unpack-dir}/${jetty-dir}/start.jar"/>
+				<pathelement path="${unpack-dir}/${jetty-dir}"/>
+				<pathelement path="${java.class.path}"/>
+			</classpath>
+			<jvmarg value="-Djetty.home=${unpack-dir}/${jetty-dir}"/>
+			<jvmarg value="-Djetty.base=${unpack-dir}/odfjettybase"/>
+		</java>
+		<!-- Update Jetty port number -->
+		<replace file="${unpack-dir}/odfjettybase/start.d/https.ini" token="https.port=8443" value="https.port=${jetty.port}"/>
+	</target>
+
+	<target name="unzip-kafka" unless="kafka-unpacked">
+		<antcall target="download-kafka"/>
+		<echo message="Installing Kafka test instance" />
+		<echo message="Deleting ${unpack-dir}/${kafka-dir}" />
+		<delete dir="${unpack-dir}/${kafka-dir}" />
+		<echo message="deleted" />
+	    <untar src="${kafka-archive}" dest="${unpack-dir}" compression="gzip" />
+
+		<!-- remove -loggc command line argument in scripts because they don't exist in the IBM JVM -->
+		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/kafka-server-start.sh" token="-loggc" value=""/>
+		<replace file="${unpack-dir}/kafka_${scala.version}-${kafka.version}/bin/zookeeper-server-start.sh" token="-loggc" value=""/>
+	</target>
+
+	<target name="unzip-spark" unless="spark-unpacked">
+		<antcall target="download-spark"/>
+		<echo message="Installing Spark test instance" />
+		<echo message="Deleting ${unpack-dir}/${spark-dir}" />
+		<delete dir="${unpack-dir}/${spark-dir}" />
+		<echo message="deleted" />
+	    <untar src="${spark-archive}" dest="${unpack-dir}" compression="gzip" />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="enable-jetty-basic-authentication">
+		<echo message="Enabling jetty basic authentication..." />
+		<echo message="Updating jetty.xml file..." />
+		<replace file="${unpack-dir}/${jetty-dir}/etc/jetty.xml">
+			<!-- See corresponding config in web.xml file of SDP webapp -->
+			<replacetoken><![CDATA[</Configure>]]></replacetoken>
+			<replacevalue>
+				<![CDATA[
+	<Call name="addBean">
+		<Arg>
+			<New class="org.eclipse.jetty.security.HashLoginService">
+				<Set name="name">ODF Realm</Set>
+				<Set name="config"><SystemProperty name="jetty.home" default="."/>/etc/realm.properties</Set>
+			</New>
+		</Arg>
+	</Call>
+</Configure>
+				]]>
+			</replacevalue>
+		</replace>
+		<echo message="Copying credentials file..." />
+		<copy file="${script.basedir}/../jettyconfig/realm.properties" tofile="${unpack-dir}/${jetty-dir}/etc/realm.properties" overwrite="true"/>
+		<echo message="Jetty basic authentication has been enabled." />
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="default">
+		<mkdir dir="${unpack-dir}"/>
+		<antcall target="unzip-jetty"/>
+		<antcall target="enable-jetty-basic-authentication"/>
+		<antcall target="unzip-kafka"/>
+		<antcall target="unzip-spark"/>
+	</target>
+
+</project>
diff --git a/odf/odf-test-env/src/assembly/bin.xml b/odf/odf-test-env/src/assembly/bin.xml
new file mode 100755
index 0000000..b5731a7
--- /dev/null
+++ b/odf/odf-test-env/src/assembly/bin.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<assembly
+	xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
+	<id>bin</id>
+	<formats>
+		<format>zip</format>
+	</formats>
+	<fileSets>
+		<fileSet>
+			<outputDirectory>/</outputDirectory>
+			<directory>target/downloads</directory>
+			<excludes>
+				<exclude>*.zip</exclude>
+				<exclude>*.gz</exclude>
+				<exclude>**/zookeeper.properties</exclude>
+				<exclude>**/server.properties</exclude>
+			</excludes>
+			<fileMode>0755</fileMode>
+		</fileSet>
+		<fileSet>
+			<outputDirectory>/</outputDirectory>
+			<directory>src/main/scripts</directory>
+			<fileMode>0755</fileMode>
+			<excludes>
+			   <exclude>**/jenkins-*.sh</exclude>
+			</excludes>
+		</fileSet>
+		<fileSet>
+			<outputDirectory>/kafka_${scala.version}-${kafka.version}/config</outputDirectory>
+			<directory>src/main/config</directory>
+			<includes>
+				<include>*.properties</include>
+			</includes>
+		</fileSet>
+		<fileSet>
+			<directory>../odf-doc/target/site</directory>
+			<outputDirectory>/odf-documentation</outputDirectory>
+		</fileSet>
+	</fileSets>
+	<files>
+		<file>
+			<source>../odf-doc/src/site/markdown/test-env.md</source>
+			<outputDirectory>/</outputDirectory>
+			<destName>README.md</destName>
+		</file>
+	</files>
+	<dependencySets>
+		<dependencySet>
+			<outputDirectory>/odfjettybase/webapps</outputDirectory>
+			<includes>
+				<include>*:war:*</include>
+			</includes>
+			<excludes>
+				<exclude>*:jar:*</exclude>
+			</excludes>
+		</dependencySet>
+	</dependencySets>
+</assembly>
diff --git a/odf/odf-test-env/src/main/config/server.properties b/odf/odf-test-env/src/main/config/server.properties
new file mode 100755
index 0000000..1f2a406
--- /dev/null
+++ b/odf/odf-test-env/src/main/config/server.properties
@@ -0,0 +1,134 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port=59092
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+#host.name=localhost
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=3
+ 
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/tmp/odftestenv-kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk. 
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according 
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:52181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=6000
diff --git a/odf/odf-test-env/src/main/config/zookeeper.properties b/odf/odf-test-env/src/main/config/zookeeper.properties
new file mode 100755
index 0000000..5f4d7e0
--- /dev/null
+++ b/odf/odf-test-env/src/main/config/zookeeper.properties
@@ -0,0 +1,33 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# 
+#    http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=/tmp/odftestenv-zookeeper
+# the port at which the clients will connect
+clientPort=52181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.bat b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
new file mode 100755
index 0000000..84c2449
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/clean_atlas.bat
@@ -0,0 +1,22 @@
+REM
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
+
+echo Delete atlas data
+del /F /S /Q "%ATLAS_HOME%\data"
diff --git a/odf/odf-test-env/src/main/scripts/clean_atlas.sh b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
new file mode 100755
index 0000000..4eb3b1d
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/clean_atlas.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+
+echo Delete atlas data
+rm -rf $ATLAS_HOME/data
diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
new file mode 100755
index 0000000..92561ad
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.bat
@@ -0,0 +1,24 @@
+REM 
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+REM set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
+set ODF_GIT_DIR=c:\git\open-discovery-framework
+
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+
+copy /Y %ODF_GIT_DIR%\odf-web\target\odf-web-1.2.0-SNAPSHOT.war %TESTENVDIR%\odfjettybase\webapps
diff --git a/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
new file mode 100755
index 0000000..732515a
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/deploy-odf-war.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set ODF_GIT_DIR to the root project of your ODF Git project (i.e. where the top pom.xml resides)
+export ODF_GIT_DIR=~/git/open-discovery-framework
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+cp $ODF_GIT_DIR/odf-web/target/odf-web-1.2.0-SNAPSHOT.war $BASEDIR/odfjettybase/webapps
diff --git a/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
new file mode 100755
index 0000000..e3f6c52
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/download-install-odf-testenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Script to download, start, and configure the ODF test environment.
+# JenkinsBuildNumber refers to the build number of the job Open-Discovery-Framework, see here:
+# https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/Open-Discovery-Framework
+#
+# Usage: download-install-odf-testenv.sh [<JenkinsBuildNumber> <Directory> ]
+#        Default values:
+#             <JenkinsBuildNumber>: lastSuccessfulBuild
+#             <Directory>: ~/odf-test-env
+#
+
+JENKINSBUILDNUMBER=$1
+if [ -z "$JENKINSBUILDNUMBER" ]; then
+   JENKINSBUILDNUMBER=lastSuccessfulBuild
+   echo Jenkins build number not provided, using default $JENKINSBUILDNUMBER
+fi
+
+TESTENVDIR=$2
+if [ -z "$TESTENVDIR" ]; then
+   TESTENVDIR=~/odf-test-env
+   echo Target directory not provided, using default $TESTENVDIR
+fi
+
+# hidden third parameter taking the jenkins job name
+JENKINSJOB=$3
+if [ -z "$JENKINSJOB" ]; then
+   JENKINSJOB=Open-Discovery-Framework
+   echo Jenkins job not provided, using default $JENKINSJOB
+fi
+
+echo Downloading test env to directory $TESTENVDIR, Jenkins build number: $JENKINSBUILDNUMBER
+
+
+TESTENVVERSION=1.2.0-SNAPSHOT
+TESTENVZIP=/tmp/odf-test-env.zip
+FULLHOSTNAME=`hostname -f`
+
+
+echo Downloading ODF test env
+curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$JENKINSJOB/$JENKINSBUILDNUMBER/artifact/odf-test-env/target/odf-test-env-$TESTENVVERSION-bin.zip --output $TESTENVZIP
+
+echo Stopping test env if it exists...
+$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh stop
+sleep 1
+echo Test env stopped
+
+echo Removing existing test env directory...
+rm -rf $TESTENVDIR/odf-test-env-$TESTENVVERSION
+echo Existing test env directory removed
+
+echo Unpacking $TESTENVZIP to $TESTENVDIR
+mkdir -p $TESTENVDIR
+unzip -q $TESTENVZIP -d $TESTENVDIR
+
+$TESTENVDIR/odf-test-env-$TESTENVVERSION/odftestenv.sh cleanall
+
+echo ODF test env installed and started
+echo "Point your browser to https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT to check it out"
diff --git a/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
new file mode 100755
index 0000000..bdb1428
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is the script used in the job definition of our Jenkins job Manage-Install-ODF-Testenv
+# The original can be foudn in get: odf-test-env/src/main/scripts/jenkins-manage-testenv.sh
+#
+# The Jenkins job should have the following parameters:
+#
+# 1. nodelabel: Label parameter. Default: odftestenv
+#
+# 2. action: Choice parameter with these choices: start, stop, cleanall, cleanconfig, cleanmetadata, install
+# Action description:
+#Available actions are:
+#<ul>
+#  <li>install: Remove the existing and install a new test environment build.
+#    Installs the most recent successful build by default. To change which build is used
+#    set the parameters <em>buildnumber</em> and <em>job</em> accordingly.</li>
+#  <li>start: (re)start the test environment</li>
+#  <li>stop:  stop the test environment</li>
+#  <li>cleanall: (re)starts with clean configuration and clean metadata</li>
+#  <li>cleanconfig   (re)starts with clean configuration</li>
+#  <li>cleanmetadata (re)starts with clean metadata</li>
+#</ul>
+#
+# 3. jenkinsjob: Choice parameter with choices: Shared-Discovery-Platform, Shared-Discovery-Platform-Parameters
+#
+# 4. buildnumber: String parmeter with default: lastSuccessfulBuild
+#
+
+echo Managing ODF test environment with parameters: action = $action, buildnumber = $buildnumber, jenkinsjob = $jenkinsjob
+
+if [ "$action" = "install" ]; then
+  ODFTESTENVTARGETDIR=/home/atlasadmin/odf-test-env
+  OUTPUTFILE=/tmp/download-install-odf-testenv.sh
+
+  if [ "$buildnumber" = "" ]; then
+    buildnumber=lastSuccessfulBuild
+  fi
+
+  if [ "$jenkinsjob" = "" ]; then
+    jenkinsjob=Shared-Discovery-Platform
+  fi
+
+  echo Downloading build number $buildnumber
+  curl https://shared-discovery-platform-jenkins.swg-devops.com:8443/job/$jenkinsjob/$buildnumber/artifact/odf-test-env/src/main/scripts/download-install-odf-testenv.sh --output $OUTPUTFILE
+
+  echo Running installer script on directory $ODFTESTENVTARGETDIR with build number $buildnumber
+  chmod 755 $OUTPUTFILE
+  export BUILD_ID=dontletjenkinskillme
+  echo Running command $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
+  $OUTPUTFILE $buildnumber $ODFTESTENVTARGETDIR $jenkinsjob
+else
+  TESTENVDIR=~/odf-test-env/odf-test-env-1.2.0-SNAPSHOT
+  export BUILD_ID=dontletjenkinskillme
+
+  $TESTENVDIR/odftestenv.sh $action
+fi
diff --git a/odf/odf-test-env/src/main/scripts/odftestenv.sh b/odf/odf-test-env/src/main/scripts/odftestenv.sh
new file mode 100755
index 0000000..94d08f3
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/odftestenv.sh
@@ -0,0 +1,232 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You should not have to change anything below this line ;-)
+###############################################################
+
+#############################################
+## Check that java and python are available
+
+
+if [ "x$JAVA_HOME" == "x" ]; then
+  echo "JAVA_HOME is not set, using standard java on path"
+  JAVAEXE=$(which java)
+else
+  echo "JAVA_HOME is set to $JAVA_HOME"
+  JAVAEXE=$JAVA_HOME/bin/java
+fi
+
+if [ ! -x $JAVAEXE ]; then
+   echo "Java executable $JAVAEXE could not be found. Set JAVA_HOME accordingly or make sure that java is in your path".
+   exit 1
+fi
+
+echo "Using java: $JAVAEXE"
+
+
+PYTHON27EXE=python
+PYTHONVERSION=`$PYTHON27EXE --version 2>&1`
+if [[ ! $PYTHONVERSION == *2.7.* ]]; then
+   echo "Warning: Python command is not version 2.7. Starting / stopping Atlas might not work properly"
+fi
+
+
+###############################################
+## Set some variables
+
+BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+FULLHOSTNAME=`hostname -f`
+
+ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+ATLAS_PORT=21453
+ATLAS_URL=https://localhost:$ATLAS_PORT
+ATLAS_USER=admin
+ATLAS_PASSWORD=UR0+HOiApXG9B8SNpKN5ww==
+
+ZK_DATADIR=/tmp/odftestenv-zookeeper
+KAFKA_DATADIR=/tmp/odftestenv-kafka-logs
+
+# export KAFKA_OPTS so that is picked up by the kafka and zookeeper start scripts. This can be used as a marker to search for those processes
+KILLMARKER=thisisanodftestenvprocess
+export KAFKA_OPTS="-D$KILLMARKER=true"
+KAFKA_HOME=$BASEDIR/kafka_2.11-0.10.0.0
+SPARK_HOME=$BASEDIR/spark-2.1.0-bin-hadoop2.7
+
+JETTY_BASE=$BASEDIR/odfjettybase
+JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
+
+##########################################
+## Copy required files
+
+if [ "$(uname)" == "Darwin" ]; then
+	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
+else
+	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
+fi
+
+##########################################
+## Functions
+
+function waitSeconds {
+   echo "     Waiting for $1 seconds..."
+   sleep $1
+}
+
+function cleanMetadata {
+	echo Removing Atlas data...
+	rm -rf $ATLAS_HOME/data
+	rm -rf $ATLAS_HOME/logs
+	echo Atlas data removed
+}
+
+function cleanConfig {
+	echo Removing Zookeeper and Kafka data...
+	rm -rf $KAFKA_DATADIR
+    rm -rf $ZK_DATADIR
+	echo Zookeeper and Kafka data removed.
+}
+
+function reconfigureODF {
+	echo Configuring ODF...
+    JSON='{ "sparkConfig": { "clusterMasterUrl": "'$SPARK_MASTER'" } }'
+    echo Updating config to $JSON
+    curl -H "Content-Type: application/json" -X PUT -d "$JSON" -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/settings
+    echo ODF configured.
+}
+
+function healthCheck {
+    echo Running ODF health check
+    curl -X GET -k -u sdp:admin4sdp https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT/odf/api/v1/engine/health
+    echo Health check finished
+}
+
+function startTestEnv {
+   echo Starting ODF test env
+   if [ -f "$ZKDATADIR" ]; then
+      echo zookeeper data exists
+   fi
+
+   echo "Starting Zookeeper"
+   nohup $KAFKA_HOME/bin/zookeeper-server-start.sh $KAFKA_HOME/config/zookeeper.properties &> $BASEDIR/nohupzookeeper.out &
+   waitSeconds 5
+   echo "Starting Kafka"
+   nohup $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &> $BASEDIR/nohupkafka.out &
+   waitSeconds 5
+   if [[ $(unzip -v $JETTY_BASE/webapps/odf-web-1.2.0-SNAPSHOT.war | grep odf-atlas-) ]]; then
+     echo "Starting Atlas"
+     nohup $PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port $ATLAS_PORT &> $BASEDIR/nohupatlas.out &
+     waitSeconds 30
+   else
+       echo "Do not start Atlas because ODF was built without it."
+   fi
+   echo "Starting Spark master"
+   cd $SPARK_HOME
+   nohup sbin/start-master.sh &> $BASEDIR/nohupspark.out &
+   waitSeconds 5
+   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
+   echo "Spark master URL: $SPARK_MASTER"
+   echo "Starting Spark slave"
+   nohup sbin/start-slave.sh $SPARK_MASTER &> $BASEDIR/nohupspark.out &
+   waitSeconds 5
+   echo "Starting ODF on Jetty"
+   cd $JETTY_BASE
+   nohup $JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=$ATLAS_URL -Datlas.user=$ATLAS_USER -Datlas.password=$ATLAS_PASSWORD -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP &> $BASEDIR/nohupjetty.out &
+   waitSeconds 10
+
+   healthCheck
+   reconfigureODF
+
+   echo "ODF test env started on https://$FULLHOSTNAME:58081/odf-web-1.2.0-SNAPSHOT"
+}
+
+function stopTestEnv {
+   echo Stopping ODF test env ...
+   echo Stopping kafka and zookeeper...
+   PROCESSNUM=`ps aux | grep $KILLMARKER | grep -v grep | wc | awk '{print $1}'`
+   if [ $PROCESSNUM -gt 0 ]; then
+      echo Killing $PROCESSNUM Kafka / ZK processes
+      kill -9 $(ps aux | grep $KILLMARKER | grep -v grep | awk '{print $2}')
+   else
+      echo No Kafka / Zookeeper processes found
+   fi
+   waitSeconds 3
+   echo Kafka and Zookeeper stopped
+   echo Stopping Atlas...
+   $PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
+   waitSeconds 5
+   echo Atlas stopped
+   echo Stopping Spark...
+   cd $SPARK_HOME
+   SPARK_MASTER=$(curl http://localhost:8080 | awk '/ Spark Master at/{print $NF}')
+   sbin/stop-slave.sh $SPARK_MASTER
+   sbin/stop-master.sh
+   waitSeconds 5
+   echo Spark stopped
+   echo Stopping Jetty...
+   cd $JETTY_BASE
+   $JAVAEXE -jar $JETTY_HOME/start.jar STOP.PORT=53000 STOP.KEY=STOP --stop
+   waitSeconds 5
+   echo Jetty stopped
+   echo ODF test env stopped
+}
+
+
+function usageAndExit {
+  echo "Usage: $0 start|stop|cleanconfig|cleanmetadata|cleanall"
+  echo "Manage the ODF test environment"
+  echo "Options:"
+  echo "         start         (re)start"
+  echo "         stop          stop"
+  echo "         cleanall      (re)starts with clean configuration and clean metadata"
+  echo "         cleanconfig   (re)starts with clean configuration"
+  echo "         cleanmetadata (re)starts with clean metadata"
+  exit 1;
+}
+
+###############################################
+## main script
+
+if [ -z "$1" ]; then
+   usageAndExit
+elif [ "$1" = "start" ]; then
+   echo "(Re) starting test env..."
+   stopTestEnv
+   echo "-------------------------------------"
+   startTestEnv
+   echo "Test env restarted"
+elif [ "$1" = "stop" ]; then
+   stopTestEnv
+elif [ "$1" = "cleanconfig" ]; then
+   echo "(Re) starting test env with clean configuration..."
+   stopTestEnv
+   cleanConfig
+   startTestEnv
+   echo "(Re)started test env with clean configuration"
+elif [ "$1" = "cleanmetadata" ]; then
+   echo "(Re) starting test env with clean metadata..."
+   stopTestEnv
+   cleanMetadata
+   startTestEnv
+   echo "(Re)started test env with clean metadata"
+elif [ "$1" = "cleanall" ]; then
+   echo "(Re) starting test env with clean configuration and metadata..."
+   stopTestEnv
+   cleanConfig
+   cleanMetadata
+   startTestEnv
+   echo "(Re)started test env with clean configuration and metadata"
+else
+   usageAndExit
+fi
diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
new file mode 100755
index 0000000..db442e0
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.bat
@@ -0,0 +1,57 @@
+REM
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM
+REM   http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+
+set JAVAEXE=%JAVA_HOME%\bin\java.exe
+set PYTHON27EXE=python
+
+
+REM you should not have to change anything below this line ;-)
+
+set TESTENVDIR=%~dp0
+set JETTY_HOME=%TESTENVDIR%jetty-distribution-9.2.10.v20150310
+set KAFKA_PACKAGE_DIR=%TESTENVDIR%kafka_2.11-0.10.0.0
+set ATLAS_HOME=%TESTENVDIR%apache-atlas-0.7-incubating-release
+
+echo Delete logs
+del /F /S /Q "C:\tmp\odftestenv-kafka-logs"
+del /F /S /Q "C:\tmp\odftestenv-zookeeper"
+
+echo Copy required files
+xcopy %ATLAS_HOME%\conf\atlas-application.properties_windows %ATLAS_HOME%\conf\atlas-application.properties /Y
+
+REM Workaround for issue #94 (Location of keystore files is hardcoded in Atlas config)
+if not exist "C:\tmp\apache-atlas-0.7-incubating-release\conf" (mkdir "C:\tmp\apache-atlas-0.7-incubating-release\conf")
+xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jceks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y
+xcopy %ATLAS_HOME%\conf\keystore_ibmjdk.jks C:\tmp\apache-atlas-0.7-incubating-release\conf /Y	
+
+echo Start zookeeper:
+start "Zookeeper" %KAFKA_PACKAGE_DIR%\bin\windows\zookeeper-server-start.bat %KAFKA_PACKAGE_DIR%\config\zookeeper.properties
+
+timeout 5 /NOBREAK
+
+echo Start kafka:
+start "Kafka" %KAFKA_PACKAGE_DIR%\bin\windows\kafka-server-start.bat %KAFKA_PACKAGE_DIR%\config\server.properties
+
+timeout 5 /NOBREAK
+
+echo Start Atlas
+start "Stop Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_stop.py
+start "Start Atlas" %PYTHON27EXE% %ATLAS_HOME%\bin\atlas_start.py -port 21443
+
+echo Start jetty
+set JETTY_BASE=%TESTENVDIR%odfjettybase
+rem set JETTY_BASE=%TESTENVDIR%base2
+cd %JETTY_BASE%
+start "Jetty" %JAVAEXE% -Dodf.zookeeper.connect=localhost:52181 -Datlas.url=https://localhost:21443 -Datlas.user=admin -Datlas.password=UR0+HOiApXG9B8SNpKN5ww== -Dodf.logspec=ALL,/tmp/odf-test-env-trace.log -jar %JETTY_HOME%\start.jar
diff --git a/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
new file mode 100755
index 0000000..664b5a9
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/start-odf-testenv.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+export JAVAEXE=java
+export PYTHON27EXE=python
+
+# You should not have to change anything below this line ;-)
+export BASEDIR="$( cd "$(dirname "$0")" ; pwd -P )"
+
+export JETTY_HOME=$BASEDIR/jetty-distribution-9.2.10.v20150310
+export KAFKA_PACKAGE_DIR=$BASEDIR/kafka_2.11-0.10.0.0
+export ATLAS_HOME=$BASEDIR/apache-atlas-0.7-incubating-release
+
+echo Delete logs
+rm -rf /tmp/odftestenv-kafka-logs
+rm -rf /tmp/odftestenv-zookeeper
+
+echo Copy required files
+if [ "$(uname)" == "Darwin" ]; then
+	cp $ATLAS_HOME/conf/atlas-application.properties_mac $ATLAS_HOME/conf/atlas-application.properties
+else
+	cp $ATLAS_HOME/conf/atlas-application.properties_linux $ATLAS_HOME/conf/atlas-application.properties
+fi
+
+echo Start zookeeper:
+$KAFKA_PACKAGE_DIR/bin/zookeeper-server-start.sh $KAFKA_PACKAGE_DIR/config/zookeeper.properties &
+
+sleep 5
+
+echo Start kafka:
+$KAFKA_PACKAGE_DIR/bin/kafka-server-start.sh $KAFKA_PACKAGE_DIR/config/server.properties &
+
+sleep 5
+
+echo Stop and restart Atlas
+$PYTHON27EXE $ATLAS_HOME/bin/atlas_stop.py
+$PYTHON27EXE $ATLAS_HOME/bin/atlas_start.py -port 21443
+
+echo Start jetty
+export JETTY_BASE=$BASEDIR/odfjettybase
+cd $JETTY_BASE
+$JAVAEXE -Dodf.zookeeper.connect=localhost:52181 -Dorg.eclipse.jetty.servlet.LEVEL=ALL -jar $JETTY_HOME/start.jar &
diff --git a/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
new file mode 100755
index 0000000..6f974b9
--- /dev/null
+++ b/odf/odf-test-env/src/main/scripts/stop-odf-testenv.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+echo Stopping all processes of the odf-test-env...
+kill -9 $(ps aux | grep 'odf-test-env' | grep -v 'download-install' | grep -v 'stop-odf-testenv' | awk '{print $2}')
diff --git a/odf/odf-web/.gitignore b/odf/odf-web/.gitignore
new file mode 100755
index 0000000..1d6f10b
--- /dev/null
+++ b/odf/odf-web/.gitignore
@@ -0,0 +1,25 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+.settings
+target
+.classpath
+.project
+.factorypath
+.externalToolBuilders
+package-lock.json
+build
+build/**
+node_modules
+node_modules/**
+.DS_Store
diff --git a/odf/odf-web/download_swagger-ui.xml b/odf/odf-web/download_swagger-ui.xml
new file mode 100755
index 0000000..74ef82d
--- /dev/null
+++ b/odf/odf-web/download_swagger-ui.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project name="odf-download-swagger-ui">
+
+	<property name="swagger-dir" value="swagger-ui-${swagger.version}" />
+	<!-- download swagger ui directly from the web:
+	<property name="swagger-download" value="https://github.com/swagger-api/swagger-ui/archive/v${swagger.version}.tar.gz" />
+	<property name="swagger-archive" value="${unpack-dir}/${swagger-dir}.tar.gz" />
+	-->
+	<!-- download swagger ui from box: -->
+	<property name="swagger-download" value="https://ibm.box.com/shared/static/13cb0nobufykaxvrnezjf2fbtf0hpfn7.gz" />
+	<property name="swagger-archive" value="${unpack-dir}/swagger-ui-2.1.4.tar.gz" />
+
+	<condition property="swagger-zip-not-found">
+		<not>
+			<available file="${swagger-archive}">
+			</available>
+		</not>
+	</condition>
+
+	<condition property="swagger-unpacked">
+	   <available file="${unpack-dir}/${swagger-dir}/dist" type="dir" />
+    </condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-swagger-ui" if="swagger-zip-not-found">
+		<echo message="Downloading Swagger..." />
+		<get verbose="true" src="${swagger-download}" dest="${swagger-archive}" />
+		<echo message="Swagger downloaded" />
+	</target>
+
+	<target name="unzip-swagger" unless="swagger-unpacked">
+		<antcall target="download-swagger-ui"/>
+		<echo message="Installing Swagger" />
+		<echo message="Deleting ${unpack-dir}/${swagger-dir}" />
+		<delete dir="${unpack-dir}/${swagger-dir}" />
+		<echo message="Deleted" />
+	    <untar src="${swagger-archive}" dest="${unpack-dir}" compression="gzip" />
+	    <!-- <unzip src="${swagger-archive}" dest="${unpack-dir}" /> -->
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="default">
+		<mkdir dir="${unpack-dir}"/>
+		<antcall target="unzip-swagger"/>
+	</target>
+
+</project>
diff --git a/odf/odf-web/package.json b/odf/odf-web/package.json
new file mode 100755
index 0000000..fc170b4
--- /dev/null
+++ b/odf/odf-web/package.json
@@ -0,0 +1,31 @@
+{
+  "name": "odf-web",
+  "version": "1.2.0-SNAPSHOT",
+  "main": "index.html",
+  "dependencies": {
+    "bootstrap": "^3.3.6",
+    "d3": "^3.5.12",
+    "react": "^0.14.6",
+    "jquery": "^2.2.0",
+    "react-addons-linked-state-mixin": "^0.14.6",
+    "react-bootstrap": "^0.28.2",
+    "react-dom": "^0.14.6",
+    "react-d3-components": "^0.6.1",
+    "bootstrap-material-design": "^0.5.7",
+    "roboto-font": "^0.1.0"
+  },
+  "devDependencies": {
+    "webpack": "^1.12.11",
+    "imports-loader": "^0.6.5",
+    "babel-core": "^6.4.0",
+    "babel-preset-es2015": "^6.3.13",
+    "babel-loader": "^6.2.1",
+    "babel-preset-react": "^6.3.13",
+    "file-loader": "^0.11.2",
+    "url-loader": "^0.5.7",
+    "css-loader": "^0.23.1",
+    "style-loader": "^0.13.0"
+  },
+  "author": "IBM",
+  "license": "ISC"
+}
diff --git a/odf/odf-web/pom.xml b/odf/odf-web/pom.xml
new file mode 100755
index 0000000..d44edcaf
--- /dev/null
+++ b/odf/odf-web/pom.xml
@@ -0,0 +1,441 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<parent>
+		<groupId>org.apache.atlas.odf</groupId>
+		<artifactId>odf</artifactId>
+		<version>1.2.0-SNAPSHOT</version>
+	</parent>
+	<artifactId>odf-web</artifactId>
+	<packaging>war</packaging>
+	<properties>
+		<!-- specify versions of components to be downloaded -->
+		<swagger.version>2.1.4</swagger.version>
+		<swagger.base.path>/${project.artifactId}-${project.version}/odf/api/v1</swagger.base.path>
+	</properties>
+
+	<dependencies>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-api</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.glassfish.jersey.core</groupId>
+			<artifactId>jersey-server</artifactId>
+			<version>2.22.2</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>javax.ws.rs</groupId>
+			<artifactId>jsr311-api</artifactId>
+			<version>1.1.1</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>javax.servlet</groupId>
+			<artifactId>servlet-api</artifactId>
+			<version>2.5</version>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<artifactId>swagger-jaxrs</artifactId>
+			<version>1.5.9</version>
+			<groupId>io.swagger</groupId>
+			<scope>compile</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-doc</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>war</type>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-spark</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+			<exclusions>
+				<!-- Exclude this dependency to avoid the following error when running the jetty-maven-plugin:
+				 "A required class was missing while executing org.eclipse.jetty:jetty-maven-plugin:9.2.14.v20151106:start: com/sun/jersey/spi/inject/InjectableProvider" -->
+				<exclusion>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-hdfs</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+		<!-- Required for compatibility with Spark cluster (must use same version) -->
+		<dependency>
+			<groupId>org.apache.commons</groupId>
+			<artifactId>commons-lang3</artifactId>
+			<version>3.5</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>4.12</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-core</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-messaging</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.atlas.odf</groupId>
+			<artifactId>odf-store</artifactId>
+			<version>1.2.0-SNAPSHOT</version>
+			<scope>runtime</scope>
+		</dependency>
+	</dependencies>
+
+	<repositories>
+		<repository>
+			<id>iis-central</id>
+			<name>Archiva Managed Maven Repository</name>
+			<url>http://iis-repo.swg.usma.ibm.com:8080/archiva/repository/all/</url>
+		</repository>
+	</repositories>
+
+	<profiles>
+		<profile>
+			<id>atlas</id>
+			<dependencies>
+				<dependency>
+					<groupId>org.apache.atlas.odf</groupId>
+					<artifactId>odf-atlas</artifactId>
+					<version>1.2.0-SNAPSHOT</version>
+					<scope>runtime</scope>
+				</dependency>
+			</dependencies>
+		</profile>
+		<profile>
+			<id>jenkinsbuild</id>
+			<properties>
+				<cf.password>${env.CFPASSWORD}</cf.password> <!-- Take cf.password from environment variable when running in Jenkins so that the password doesn't appear in the log -->
+			</properties>
+		</profile>
+		<profile>
+			<id>integration-tests</id>
+			<activation>
+				<property>
+					<name>reduced-tests</name>
+					<value>!true</value>
+				</property>
+			</activation>
+			<build>
+				<plugins>
+					<plugin>
+						<groupId>org.apache.maven.plugins</groupId>
+						<artifactId>maven-failsafe-plugin</artifactId>
+						<version>2.19</version>
+						<configuration>
+							<systemPropertyVariables>
+								<!-- we always use the embedded Kafka in our integration tests -->
+								<odf.zookeeper.connect>${testZookeepeConnectionString}</odf.zookeeper.connect>
+								<odf.test.base.url>${odf.test.base.url}</odf.test.base.url>
+								<odf.test.webapp.url>${odf.test.webapp.url}</odf.test.webapp.url>
+								<odf.test.user>${odf.test.user}</odf.test.user>
+								<odf.test.password>${odf.test.password}</odf.test.password>
+								<odf.logspec>${odf.integrationtest.logspec}.client</odf.logspec>
+								<!-- The atlas configuration properties are only required when the "atlas" profile is activated -->
+								<atlas.url>${atlas.url}</atlas.url>
+								<atlas.user>${atlas.user}</atlas.user>
+								<atlas.password>${atlas.password}</atlas.password>
+							</systemPropertyVariables>
+							<includes>
+								<include>**/integrationtest/**</include>
+							</includes>
+						</configuration>
+						<executions>
+							<execution>
+								<id>integration-test</id>
+								<goals>
+									<goal>integration-test</goal>
+								</goals>
+							</execution>
+							<execution>
+								<id>verify</id>
+								<goals>
+									<goal>verify</goal>
+								</goals>
+							</execution>
+						</executions>
+					</plugin>
+				</plugins>
+			</build>
+		</profile>
+	</profiles>
+
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.19</version>
+				<configuration>
+					<systemPropertyVariables>
+						<odf.logspec>${odf.unittest.logspec}</odf.logspec>
+						<odf.build.project.name>${project.name}</odf.build.project.name>
+					</systemPropertyVariables>
+					<excludes>
+						<exclude>**/integrationtest/**</exclude>
+					</excludes>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.eclipse.jetty</groupId>
+				<artifactId>jetty-maven-plugin</artifactId>
+				<version>9.2.14.v20151106</version>
+				<configuration>
+					<jettyXml>${project.parent.basedir}/jettyconfig/jetty.xml,${project.parent.basedir}/jettyconfig/jetty-ssl.xml,${project.parent.basedir}/jettyconfig/jetty-https.xml</jettyXml>
+					<scanIntervalSeconds>10</scanIntervalSeconds>
+					<stopPort>8005</stopPort>
+					<stopKey>STOP</stopKey>
+					<systemProperties>
+						<systemProperty>
+							<name>odf.zookeeper.connect</name>
+							<value>${testZookeepeConnectionString}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>odf.logspec</name>
+							<value>${odf.integrationtest.logspec}.jettyserver</value>
+						</systemProperty>
+						<systemProperty>
+							<name>jetty.config.dir</name>
+							<value>${project.parent.basedir}/target/jettyconfig</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.url</name>
+							<value>${atlas.url}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.user</name>
+							<value>${atlas.user}</value>
+						</systemProperty>
+						<systemProperty>
+							<name>atlas.password</name>
+							<value>${atlas.password}</value>
+						</systemProperty>
+					</systemProperties>
+				</configuration>
+				<executions>
+					<execution>
+						<id>start-jetty</id>
+						<phase>pre-integration-test</phase>
+						<goals>
+							<goal>start</goal>
+						</goals>
+						<configuration>
+							<scanIntervalSeconds>0</scanIntervalSeconds>
+							<daemon>true</daemon>
+						</configuration>
+					</execution>
+					<execution>
+						<id>stop-jetty</id>
+						<phase>post-integration-test</phase>
+						<goals>
+							<goal>stop</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>com.github.eirslett</groupId>
+				<artifactId>frontend-maven-plugin</artifactId>
+				<version>0.0.27</version>
+				<configuration>
+					<installDirectory>build</installDirectory>
+				</configuration>
+
+				<executions>
+					<execution>
+						<id>install node and npm</id>
+						<goals>
+							<goal>install-node-and-npm</goal>
+						</goals>
+						<configuration>
+							<nodeVersion>v8.1.2</nodeVersion>
+							<npmVersion>5.0.3</npmVersion>
+						</configuration>
+					</execution>
+					<execution>
+						<id>npm install</id>
+						<goals>
+							<goal>npm</goal>
+						</goals>
+						<configuration>
+							<arguments>install</arguments>
+						</configuration>
+					</execution>
+					<execution>
+						<id>webpack build</id>
+						<goals>
+							<goal>webpack</goal>
+						</goals>
+						<configuration>
+							<!-- change to -p for production mode -->
+							<arguments>-d</arguments>
+						</configuration>
+					</execution>
+					<!-- <execution> <id>npm-list-packages</id> <goals> <goal>npm</goal>
+						</goals> <phase>validate</phase> <configuration> <arguments>ls depth=0</arguments>
+						</configuration> </execution> -->
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-war-plugin</artifactId>
+				<version>2.4</version>
+				<configuration>
+					<failOnMissingWebXml>false</failOnMissingWebXml>
+					<packagingExcludes>**/scripts/**</packagingExcludes>
+					<overlays>
+						<overlay>
+							<!-- define here which files you want to take over from the odf-doc
+								war. -->
+							<groupId>org.apache.atlas.odf</groupId>
+							<artifactId>odf-doc</artifactId>
+							<excludes>
+								<exclude>WEB-INF/web.xml</exclude>
+							</excludes>
+							<includes>
+								<include>doc/**</include>
+							</includes>
+						</overlay>
+					</overlays>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<inherited>false</inherited>
+						<id>prepare-embedded-jetty</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<ant antfile="../prepare_embedded_jetty.xml" target="prepare-jetty-config" />
+							</target>
+						</configuration>
+					</execution>
+					<execution>
+						<id>prepare-components</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<target>
+								<property name="unpack-dir" value="${project.build.directory}/downloads" />
+								<property name="swagger.version" value="${swagger.version}" />
+								<ant antfile="download_swagger-ui.xml" target="default"></ant>
+							</target>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<artifactId>maven-resources-plugin</artifactId>
+				<version>2.6</version>
+				<executions>
+					<execution>
+						<id>copy-resources</id>
+						<phase>process-resources</phase>
+						<goals>
+							<goal>copy-resources</goal>
+						</goals>
+						<configuration>
+							<outputDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</outputDirectory>
+							<resources>
+								<resource>
+									<directory>${project.build.directory}/downloads/swagger-ui-${swagger.version}/dist</directory>
+									<filtering>false</filtering>
+									<excludes>
+										<exclude>index.html</exclude>
+									</excludes>
+								</resource>
+							</resources>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>com.github.kongchen</groupId>
+				<artifactId>swagger-maven-plugin</artifactId>
+				<version>3.1.1</version>
+				<configuration>
+					<apiSources>
+						<apiSource>
+							<springmvc>false</springmvc>
+							<locations>org.apache.atlas.odf.admin.rest.resources</locations>
+							<schemes>https</schemes>
+							<basePath>${swagger.base.path}</basePath>
+							<info>
+								<title>Open Discovery Framework</title>
+								<version>v1</version>
+								<description>
+									API reference
+								</description>
+							</info>
+							<swaggerDirectory>${project.build.directory}/${project.artifactId}-${project.version}/swagger</swaggerDirectory>
+							<swaggerApiReader>com.wordnik.swagger.jaxrs.reader.DefaultJaxrsApiReader</swaggerApiReader>
+						</apiSource>
+					</apiSources>
+				</configuration>
+				<executions>
+					<execution>
+						<phase>compile</phase>
+						<goals>
+							<goal>generate</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+</project>
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
new file mode 100755
index 0000000..89756cc
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/log/LoggingHandler.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.log;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.SimpleFormatter;
+
+public class LoggingHandler extends Handler {
+
+	private int LOG_CACHE_SIZE = 1000;
+	private static List<LogRecord> cachedLogs = Collections.synchronizedList(new ArrayList<LogRecord>());
+
+	@Override
+	public void publish(LogRecord record) {
+		cachedLogs.add(record);
+		if (cachedLogs.size() >= LOG_CACHE_SIZE) {
+			cachedLogs.remove(0);
+		}
+	}
+
+	@Override
+	public void flush() {
+		cachedLogs.clear();
+	}
+
+	@Override
+	public void close() throws SecurityException {
+		cachedLogs.clear();
+	}
+
+	public List<LogRecord> getCachedLog() {
+		return new ArrayList<LogRecord>(cachedLogs);
+	}
+
+	public String getFormattedCachedLog(Integer numberOfLogs, Level logLevel) {
+		final List<LogRecord> cachedLog = getCachedLog();
+		StringBuilder lg = new StringBuilder();
+		final SimpleFormatter simpleFormatter = new SimpleFormatter();
+		if (numberOfLogs != null) {
+			for (int no = numberOfLogs; no > 0; no--) {
+				if (no > -1 && no < cachedLog.size() - 1) {
+					final LogRecord record = cachedLog.get(cachedLog.size() - no);
+					if (record.getLevel().intValue() >= logLevel.intValue()) {
+						lg.append(simpleFormatter.format(record));
+					}
+				}
+			}
+		} else {
+			for (LogRecord record : cachedLog) {
+				lg.append(simpleFormatter.format(record));
+			}
+		}
+		return lg.toString();
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
new file mode 100755
index 0000000..b51da36
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/ODFAdminApp.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.core.Application;
+
+import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider;
+import org.apache.atlas.odf.admin.rest.resources.AnalysesResource;
+import org.apache.atlas.odf.admin.rest.resources.AnnotationsResource;
+import org.apache.atlas.odf.admin.rest.resources.DiscoveryServicesResource;
+import org.apache.atlas.odf.admin.rest.resources.EngineResource;
+import org.apache.atlas.odf.admin.rest.resources.ImportResource;
+import org.apache.atlas.odf.admin.rest.resources.MetadataResource;
+import org.apache.atlas.odf.admin.rest.resources.SettingsResource;
+
+public class ODFAdminApp extends Application {
+	@Override
+	public Set<Class<?>> getClasses() {
+		Set<Class<?>> classes = new HashSet<Class<?>>();
+		classes.add(AnalysesResource.class);
+		classes.add(SettingsResource.class);
+		classes.add(EngineResource.class);
+		classes.add(MetadataResource.class);
+		classes.add(AnnotationsResource.class);
+		classes.add(DiscoveryServicesResource.class);
+		classes.add(ImportResource.class);
+		return classes;
+	}
+
+	@Override
+	public Set<Object> getSingletons() {
+		Set<Object> set = new HashSet<Object>();
+		set.add(new JacksonJsonProvider());
+		return set;
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
new file mode 100755
index 0000000..ed9010d
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/RestUtils.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+public class RestUtils {
+	public static Response createErrorResponse(Throwable t) {
+		StringWriter sw = new StringWriter();
+		PrintWriter pw = new PrintWriter(sw);
+		t.printStackTrace(pw);
+		return createErrorResponse(sw.toString());
+	}
+
+	public static Response createErrorResponse(String msg) {
+		Logger logger = Logger.getLogger(RestUtils.class.getName());
+		logger.log(Level.WARNING, "An unknown exception was thrown: ''{0}''", msg);
+		String errorMsg = "{ \"error\": \"An unknown exception occurred\"}";
+		try {
+			JSONObject errorJSON = new JSONObject();
+			errorJSON.put("error", msg);
+			errorMsg = errorJSON.write();
+		} catch (JSONException e) {
+			// do nothing, should never happen
+		}
+		return Response.status(Status.BAD_REQUEST).entity(errorMsg).build();
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java
new file mode 100755
index 0000000..a3bc3b2
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnalysesResource.java
@@ -0,0 +1,156 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.util.logging.Logger;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.atlas.odf.api.analysis.AnalysisCancelResult;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
+import org.apache.atlas.odf.api.ODFFactory;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+@Path("/analyses")
+@Api(value = "/analyses", description = "Create and view analysis requests", produces = MediaType.APPLICATION_JSON)
+public class AnalysesResource {
+	private Logger logger = Logger.getLogger(AnalysesResource.class.getName());
+
+	@GET
+	@Path("/stats")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get analysis request statistics", httpMethod = "GET", notes = "Return number of successfull and failing analysis requests", response = AnalysisRequestSummary.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getStats() {
+		try {
+			return Response.ok(JSONUtils.toJSON(new ODFFactory().create().getAnalysisManager().getAnalysisStats())).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get list of analysis requests", httpMethod = "GET", notes = "Retrieve list of recent analysis requests (from latest to oldest)", responseContainer="List", response = AnalysisRequestTrackers.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getAnalysisRequests(
+			@ApiParam(value = "Starting offset (use 0 to start with the latest request).", required = false)
+			@DefaultValue("0") @QueryParam("offset") int offset,
+			@ApiParam(value = "Maximum number of analysis requests to be returned (use -1 to retrieve all requests).", required = false)
+			@DefaultValue("10") @QueryParam("limit") int limit) {
+		try {
+			String result = JSONUtils.toJSON(new ODFFactory().create().getAnalysisManager().getAnalysisRequests(offset, limit));
+			return Response.ok(result).build();
+		} catch (Exception exc) {
+			throw new RuntimeException(exc);
+		}
+	}
+
+	@GET
+	@Path("/{requestId}")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get analysis request status", httpMethod = "GET", notes = "Show status of a specific analysis request", response = AnalysisRequestStatus.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getAnalysisStatus(
+			@ApiParam(value = "ID of the analysis request", required = true)
+			@PathParam("requestId") String requestId) {
+		logger.entering(AnalysesResource.class.getName(), "getAnalysisStatus");
+		AnalysisRequestStatus analysisRequestStatus = new ODFFactory().create().getAnalysisManager().getAnalysisRequestStatus(requestId);
+		try {
+			return Response.ok(JSONUtils.toJSON(analysisRequestStatus)).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@POST
+	@Produces(MediaType.APPLICATION_JSON)
+	@Consumes(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Run analysis", httpMethod = "POST", notes = "Create and run new analysis request", response = AnalysisResponse.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response startAnalysis(@ApiParam(value = "Analysis request to be started", required = true) AnalysisRequest request) {
+		logger.entering(AnalysesResource.class.getName(), "startAnalysis");
+		try {
+			AnalysisResponse analysisResponse = new ODFFactory().create().getAnalysisManager().runAnalysis(request);
+			return Response.ok(JSONUtils.toJSON(analysisResponse)).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@POST
+	@Path("/{requestId}/cancel")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Cancel analysis request", httpMethod = "POST", notes = "Cancel a queued analysis request that has not been started yet", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request - The request with the provided id could not be found"),
+			@ApiResponse(code = 403, message = "Forbidden - The status of the analysis request does not allow for cancellation")
+	})
+	public Response cancelAnalysisRequest(@ApiParam(value = "ID of the analysis request", required = true) @PathParam("requestId") String requestId) {
+		logger.entering(AnalysesResource.class.getName(), "cancelAnalysisRequest");
+		AnalysisCancelResult result = new ODFFactory().create().getAnalysisManager().cancelAnalysisRequest(requestId);
+		if (result.getState() == AnalysisCancelResult.State.NOT_FOUND) {
+			return Response.status(Status.BAD_REQUEST).build();
+		} else if (result.getState() == AnalysisCancelResult.State.INVALID_STATE) {
+			return Response.status(Status.FORBIDDEN).build();
+		}
+		return Response.ok().build();
+	}
+
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java
new file mode 100755
index 0000000..704b004
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/AnnotationsResource.java
@@ -0,0 +1,130 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.annotation.AnnotationStoreUtils;
+import org.apache.atlas.odf.api.annotation.Annotations;
+import org.apache.atlas.odf.json.JSONUtils;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+@Path("/annotations")
+@Api(value = "/annotations", description = "Create and query ODF annotations", produces = MediaType.APPLICATION_JSON)
+public class AnnotationsResource {
+
+	Logger logger = Logger.getLogger(AnnotationsResource.class.getName());
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Retrieve annotations", httpMethod = "GET", notes = "Retrieve annotations for an asset and/or for a specific analysis request.", response = Annotations.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response retrieveAnnotationsForAsset(@ApiParam(value = "Reference ID of the asset", required = false) @QueryParam("assetReference") String assetReference,
+			@ApiParam(value = "Analysis request ID", required = false) @QueryParam("analysisRequestId") String analysisRequestId) {
+		try {
+			MetaDataObjectReference ref = null;
+			if (assetReference != null) {
+				ref = new MetaDataObjectReference();
+				String repoId = new ODFFactory().create().getMetadataStore().getRepositoryId();
+				ref.setRepositoryId(repoId);
+				ref.setId(assetReference);
+			}
+			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+			List<Annotation> annots = as.getAnnotations(ref, analysisRequestId);
+			Annotations result = new Annotations();
+			result.setAnnotations(annots);
+			return Response.ok(JSONUtils.toJSON(result)).build();
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An exception occurred while retrieving annotations", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+	}
+
+
+	@GET
+	@Path("/objects/{objectReference}")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Retrieve annotation", httpMethod = "GET", notes = "Retrieve annotation by Id.", response = Annotation.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response retrieveAnnotation(@ApiParam(value = "Reference ID of the annotation", required = true) @PathParam("objectReference") String objectReference) {
+		try {
+			MetaDataObjectReference ref = new MetaDataObjectReference();
+			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+			ref.setRepositoryId(as.getRepositoryId());
+			ref.setId(objectReference);
+			Annotation annot = as.retrieveAnnotation(ref);
+			return Response.ok(JSONUtils.toJSON(annot)).build();
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An exception occurred while retrieving annotation", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+	}
+
+
+	// no swagger documentation as this will be replaced by "annotation propagation"
+	@GET
+	@Path("/newestAnnotations/{assetReference}")
+	@Produces(MediaType.APPLICATION_JSON)
+	public Response retrieveMostRecentAnnotations(@PathParam("assetReference") String assetReference) {
+		try {
+			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
+			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+			List<Annotation> annotations = AnnotationStoreUtils.getMostRecentAnnotationsByType(as, ref);
+			String result = JSONUtils.toJSON(annotations);
+			return Response.ok(result).build();
+		} catch (Exception e) {
+			logger.log(Level.WARNING, "An exception occurred while retrieving most recent annotations", e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@POST
+	@ApiOperation(value = "Create annotation", httpMethod = "POST", notes = "Create new annotation object", response = MetaDataObjectReference.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response createAnnotation(@ApiParam(value = "Analysis request to be started", required = true) String annotString) {
+		try {
+			Annotation annot = JSONUtils.fromJSON(annotString, Annotation.class);
+			AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+			MetaDataObjectReference annotRef = as.store(annot);
+			return Response.status(Status.CREATED).entity(JSONUtils.toJSON(annotRef)).build();
+		} catch (Exception exc) {
+			logger.log(Level.WARNING, "An exception occurred while storing an annotation", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+	}
+
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java
new file mode 100755
index 0000000..bd01e60
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/DiscoveryServicesResource.java
@@ -0,0 +1,341 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.io.InputStream;
+import java.util.List;
+import java.util.logging.Logger;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceManager;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceRuntimeStatistics;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceStatus;
+import org.apache.atlas.odf.api.discoveryservice.ServiceNotFoundException;
+import org.apache.atlas.odf.api.discoveryservice.ServiceStatusCount;
+import org.apache.atlas.odf.json.JSONUtils;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+@Path("/services")
+@Api(value = "/services", description = "Manage ODF services", produces = MediaType.APPLICATION_JSON)
+public class DiscoveryServicesResource {
+	private Logger logger = Logger.getLogger(DiscoveryServicesResource.class.getName());
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get list of discovery services", httpMethod = "GET", notes = "Retrieve list of all discovery services registered in ODF", responseContainer="List", response = DiscoveryServiceProperties.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getDiscoveryServices() {
+		logger.entering(DiscoveryServicesResource.class.getName(), "getServices");
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response;
+		List<DiscoveryServiceProperties> dsProperties = dsAdmin.getDiscoveryServicesProperties();
+		try {
+			String json = JSONUtils.toJSON(dsProperties);
+			response = Response.ok(json).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			response = RestUtils.createErrorResponse(e);
+		}
+		return response;
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@Path("/status")
+	@ApiOperation(value = "Get status of discovery services", httpMethod = "GET", notes = "Retrieve status overview of all discovery services registered in ODF", responseContainer="List", response = ServiceStatusCount.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 404, message = "Not found"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getAllServicesStatus() {
+		logger.entering(DiscoveryServicesResource.class.getName(), "getAllServicesStatus");
+		List<ServiceStatusCount> servicesStatus = new ODFFactory().create().getDiscoveryServiceManager().getDiscoveryServiceStatusOverview();
+		if (servicesStatus == null) {
+			return Response.status(Status.NOT_FOUND).build();
+		}
+		String json;
+		try {
+			json = JSONUtils.toJSON(servicesStatus);
+		} catch (JSONException e) {
+			throw new RuntimeException(e);
+		}
+		return Response.ok(json).build();
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@Path("/{serviceId}/status")
+	@ApiOperation(value = "Get discovery service status", httpMethod = "GET", notes = "Retrieve status of a discovery service that is registered in ODF", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 404, message = "Not found"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getDiscoveryServiceStatus(
+			@ApiParam(value = "Discovery service ID", required = true)
+			@PathParam("serviceId") String serviceId) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceStatus");
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response;
+		try {
+			DiscoveryServiceStatus dsStatus = dsAdmin.getDiscoveryServiceStatus(serviceId);
+			if (dsStatus == null) {
+				response = Response.status(Status.NOT_FOUND).build();
+			}
+			else {
+				try {
+					String json = JSONUtils.toJSON(dsStatus);
+					response = Response.ok(json).build();
+				} catch (JSONException e) {
+					e.printStackTrace();
+					logger.info("Parse exception " + e);
+					response = RestUtils.createErrorResponse(e);
+				}
+			}
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		return response;
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@Path("/{serviceId}/runtimeStats")
+	@ApiOperation(value = "Get runtime statistics of a discovery service", httpMethod = "GET", notes = "Retrieve the runtime statistics of a discovery service that is registered in ODF.", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 404, message = "Not found"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getDiscoveryServiceRuntimeStats(
+			@ApiParam(value = "Discovery service ID", required = true)
+			@PathParam("serviceId") String serviceId) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceRuntimeStats");
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response;
+		try {
+			DiscoveryServiceRuntimeStatistics dsRuntimeStats = dsAdmin.getDiscoveryServiceRuntimeStatistics(serviceId);
+			String json = JSONUtils.toJSON(dsRuntimeStats);
+			response = Response.ok(json).build();
+		}
+		catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			response = RestUtils.createErrorResponse(e);
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		return response;
+	}
+
+	@DELETE
+	@Path("/{serviceId}/runtimeStats")
+	@ApiOperation(value = "Delete runtime statistics of a discovery service", httpMethod = "DELETE", notes = "Delete the runtime statistics of a discovery service that is registered in ODF.", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 404, message = "Not found"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response deleteDiscoveryServiceRuntimeStats(
+			@ApiParam(value = "Discovery service ID", required = true)
+			@PathParam("serviceId") String serviceId) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "deleteDiscoveryServiceRuntimeStats");
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response;
+		try {
+			dsAdmin.deleteDiscoveryServiceRuntimeStatistics(serviceId);
+			response = Response.ok().build();
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		return response;
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@Path("/{serviceId}")
+	@ApiOperation(value = "Get properties of a discovery service registered in ODF", httpMethod = "GET", notes = "Retrieve properties of a discovery service that is registered in ODF", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 404, message = "Not found"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getDiscoveryServiceProperties(
+			@ApiParam(value = "Id string of discovery service", required = true)
+			@PathParam("serviceId") String serviceId) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "getDiscoveryServiceProperties");
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response;
+		try {
+			DiscoveryServiceProperties dsStatus = dsAdmin.getDiscoveryServiceProperties(serviceId);
+			if (dsStatus == null) {
+				response = Response.status(Status.NOT_FOUND).build();
+			}
+			else {
+				try {
+					String json = JSONUtils.toJSON(dsStatus);
+					response = Response.ok(json).build();
+				} catch (JSONException e) {
+					e.printStackTrace();
+					logger.info("Parse exception " + e);
+					response = RestUtils.createErrorResponse(e);
+				}
+			}
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		return response;
+	}
+
+	@POST
+	@Consumes(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Register a discovery service", httpMethod = "POST", notes = "Register a new service in ODF", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response registerDiscoveryService(
+			@ApiParam(value = "ODF service definition", required = true) DiscoveryServiceProperties dsProperties) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "registerDiscoveryService");
+		Response response;
+		try {
+			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+			dsAdmin.createDiscoveryService(dsProperties);
+			response = Response.ok().build();
+		} catch (ValidationException e) {
+			e.printStackTrace();
+			logger.info("Validation exception during setting of property " + e.getProperty());
+			response = RestUtils.createErrorResponse(e.getErrorCause());
+		}
+		return response;
+	}
+
+	@PUT
+	@Consumes(MediaType.APPLICATION_JSON)
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Update properties of a discovery service", httpMethod = "POST", notes = "Update properties of a discovery service that is registered in ODF", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response updateDiscoveryService(
+			@ApiParam(value = "ODF service definition", required = true) DiscoveryServiceProperties dsProperties) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "updateDiscoveryService");
+		Response response;
+		try {
+			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+			dsAdmin.replaceDiscoveryService(dsProperties);
+			response = Response.ok().build();
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		catch (ValidationException e) {
+			e.printStackTrace();
+			logger.info("Validation exception during setting of property " + e.getProperty());
+			response = RestUtils.createErrorResponse(e.getErrorCause());
+		}
+		return response;
+	}
+
+	@DELETE
+	@Path("/{serviceId}")
+	@ApiOperation(value = "Delete a discovery service", httpMethod = "DELETE", notes = "Remove a registered service from ODF", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response deleteDiscoveryService(
+			@ApiParam(value = "Id string of discovery service to be deleted", required = true)
+			@PathParam("serviceId") String serviceId) {
+		logger.entering(DiscoveryServicesResource.class.getName(), "deleteDiscoveryService");
+		Response response;
+		try {
+			DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+			dsAdmin.deleteDiscoveryService(serviceId);
+			response = Response.ok().build();
+		}
+		catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		catch (ValidationException e) {
+			e.printStackTrace();
+			logger.info("Validation exception during deletion. Property: " + e.getProperty());
+			response = RestUtils.createErrorResponse(e.getErrorCause());
+		}
+		return response;
+	}
+
+	@GET
+	@Path("/{serviceId}/image")
+	@Produces("image/*")
+	@ApiOperation(value = "Get a discovery service logo", httpMethod = "GET", notes = "Retrieve image representing a discovery service", response = InputStream.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 404, message = "Not found"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getImage(
+			@ApiParam(value = "ID of discovery service", required = true)
+			@PathParam("serviceId") String serviceId) {
+
+		DiscoveryServiceManager dsAdmin = new ODFFactory().create().getDiscoveryServiceManager();
+		Response response = null;
+		InputStream is;
+		try {
+			is = dsAdmin.getDiscoveryServiceImage(serviceId);
+			if (is == null) {
+				// should never happen
+				response = Response.status(Status.NOT_FOUND).build();
+			}
+			else {
+				response = Response.ok(is, "image/png").build();
+			}
+		} catch (ServiceNotFoundException snfe) {
+			response = Response.status(Status.NOT_FOUND).entity(snfe.getMessage()).build();
+		}
+		return response;
+	}
+
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java
new file mode 100755
index 0000000..d6cd37d
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/EngineResource.java
@@ -0,0 +1,167 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.atlas.odf.api.engine.SystemHealth;
+import org.apache.atlas.odf.api.utils.ODFLogConfig;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.admin.log.LoggingHandler;
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.engine.ODFEngineOptions;
+import org.apache.atlas.odf.api.engine.ODFStatus;
+import org.apache.atlas.odf.api.engine.ODFVersion;
+import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
+import org.apache.atlas.odf.json.JSONUtils;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+@Path("/engine")
+@Api(value = "/engine", description = "Monitor and control the ODF engine", produces = MediaType.APPLICATION_JSON)
+public class EngineResource {
+	final static LoggingHandler REST_LOG_HANDLER = new LoggingHandler();
+
+	static {
+		//initialize log config and log handler to cache logs
+		ODFLogConfig.run();
+		Logger rootLogger = Logger.getLogger("org.apache.atlas.odf");
+		REST_LOG_HANDLER.setLevel(Level.ALL);
+		rootLogger.addHandler(REST_LOG_HANDLER);
+	}
+
+	private Logger logger = Logger.getLogger(EngineResource.class.getName());
+
+	@POST
+	@Path("shutdown")
+	@Consumes(MediaType.APPLICATION_JSON)
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Shutdown ODF engine", httpMethod = "POST", notes = "Shutdown ODF engine, purge all scheduled analysis requests from the queues, and cancel all running analysis requests (for debugging purposes only)", response = Response.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response shutdown(@ApiParam(value = "Engine options", defaultValue = "false", required = true) ODFEngineOptions engineOptions) {
+		logger.entering(EngineResource.class.getName(), "shutdown");
+		logger.log(Level.INFO, "Restart option is ", engineOptions.isRestart());
+		new ODFFactory().create().getEngineManager().shutdown(engineOptions);
+		return Response.ok().build();
+	}
+
+	@GET
+	@Path("health")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get health status", httpMethod = "GET", notes = "Check the health status of ODF", response = SystemHealth.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response healthCheck() {
+		logger.entering(EngineResource.class.getName(), "healthCheck");
+		SystemHealth health = new ODFFactory().create().getEngineManager().checkHealthStatus();
+		Status status = Status.OK;
+		try {
+			return Response.status(status).entity(JSONUtils.toJSON(health)).type(MediaType.APPLICATION_JSON).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("runtimes")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get info about the available runtimes", httpMethod = "GET", notes = "Get information about all runtimes running discovery services", response = SystemHealth.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 400, message = "Bad Request"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getRuntimesInfo() {
+		logger.entering(EngineResource.class.getName(), "getRuntimesInfo");
+		ServiceRuntimesInfo sri = new ODFFactory().create().getEngineManager().getRuntimesInfo();
+		Status status = Status.OK;
+		try {
+			return Response.status(status).entity(JSONUtils.toJSON(sri)).type(MediaType.APPLICATION_JSON).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		} finally {
+			logger.exiting(EngineResource.class.getName(), "getRuntimesInfo");
+		}
+	}
+
+	@GET
+	@Path("status")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get current status", httpMethod = "GET", notes = "Retrieve status of the messaging subsystem and the internal thread manager", response = ODFStatus.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getStatus() throws IOException {
+		logger.entering(EngineResource.class.getName(), "getStatus");
+		try {
+			ODFStatus odfStatus = new ODFFactory().create().getEngineManager().getStatus();
+			return Response.status(Status.OK).entity(JSONUtils.toJSON(odfStatus)).type(MediaType.APPLICATION_JSON).build();
+		} catch (Exception exc) {
+			logger.log(Level.INFO, "An exception occurred while getting the request status", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+	}
+
+	@GET
+	@Path("log")
+	@Produces(MediaType.TEXT_PLAIN)
+	@ApiOperation(value = "Get current application log", httpMethod = "GET", notes = "Retrieve logs of the ODF instance", response = ODFStatus.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getLog(@QueryParam("numberOfLogs") Integer numberOfLogs, @QueryParam("logLevel") String logLevel) throws IOException {
+		logger.entering(EngineResource.class.getName(), "getLog");
+		try {
+			Level level = Level.ALL;
+			if (logLevel != null) {
+				level = Level.parse(logLevel);
+			}
+			return Response.status(Status.OK).entity(REST_LOG_HANDLER.getFormattedCachedLog(numberOfLogs, level)).type(MediaType.TEXT_PLAIN).build();
+		} catch (Exception exc) {
+			logger.log(Level.INFO, "An exception occurred while getting the ODF log", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+	}
+
+	@GET
+	@Path("version")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get the ODF build version", httpMethod = "GET", notes = "The version is of the form versionnumber-buildid, e.g., 0.1.0-154", response = ODFVersion.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getVersion() {
+		try {
+			ODFVersion version = new ODFFactory().create().getEngineManager().getVersion();
+			Status status = Status.OK;
+			return Response.status(status).entity(JSONUtils.toJSON(version)).type(MediaType.APPLICATION_JSON).build();
+		} catch (Exception exc) {
+			logger.log(Level.INFO, "An exception occurred while getting the version", exc);
+			return RestUtils.createErrorResponse(exc);
+		}
+
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java
new file mode 100755
index 0000000..ef489a8
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/ImportResource.java
@@ -0,0 +1,89 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.util.logging.Logger;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImportResult;
+import org.apache.atlas.odf.api.metadata.importer.JDBCMetadataImporter;
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.importer.MetadataImportException;
+import org.apache.atlas.odf.api.metadata.models.JDBCConnection;
+import org.apache.atlas.odf.json.JSONUtils;
+
+@Path("/import")
+public class ImportResource {
+	private Logger logger = Logger.getLogger(ImportResource.class.getName());
+
+	@POST
+	@Consumes(MediaType.APPLICATION_JSON)
+	@Produces(MediaType.APPLICATION_JSON)
+	public Response doImport(String parameterString) {
+		logger.entering(ImportResource.class.getName(), "import");
+		try {
+			JSONObject parameter = new JSONObject(parameterString);
+
+			Object jdbcObj = parameter.get("jdbcString");
+			Object userObj = parameter.get("user");
+			Object passwordObj = parameter.get("password");
+			Object dbObj = parameter.get("database");
+			Object schemaObj = parameter.get("schema");
+			Object tableObj = parameter.get("table");
+
+			if (jdbcObj == null || userObj == null || passwordObj == null) {
+				return RestUtils.createErrorResponse("jdbcString, user, password, database, schema and table are required!");
+			}
+
+			String user = (String) userObj;
+			String password = (String) passwordObj;
+			String jdbcString = (String) jdbcObj;
+			String db = (String) dbObj;
+			String schema = (String) schemaObj;
+			String table = (String) tableObj;
+
+			JDBCMetadataImporter importer = new ODFFactory().create().getJDBCMetadataImporter();
+			JDBCConnection conn = new JDBCConnection();
+			conn.setJdbcConnectionString(jdbcString);
+			conn.setUser(user);
+			conn.setPassword(password);
+
+			JDBCMetadataImportResult result = null;
+			try {
+				result = importer.importTables(conn, db, schema, table);
+			} catch (MetadataImportException ex) {
+				return RestUtils.createErrorResponse(ex.getMessage());
+			}
+
+			if (result == null) {
+				return Response.serverError().build();
+			}
+
+			return Response.ok(JSONUtils.toJSON(result)).build();
+		} catch (JSONException e) {
+			return RestUtils.createErrorResponse(e.getMessage());
+		}
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java
new file mode 100755
index 0000000..9daf09a
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/MetadataResource.java
@@ -0,0 +1,246 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Hashtable;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.wink.json4j.JSONException;
+import org.apache.wink.json4j.JSONObject;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.metadata.InternalMetaDataUtils;
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.models.MetaDataObject;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.json.JSONUtils;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+@Path("/metadata")
+@Api(value = "/metadata", description = "Populate and query metadata repository", produces = MediaType.APPLICATION_JSON)
+public class MetadataResource {
+	private Logger logger = Logger.getLogger(MetadataResource.class.getName());
+
+	@GET
+	@Path("/connectiontest")
+	public Response testConnection() {
+		try {
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			MetadataStore.ConnectionStatus status = mds.testConnection();
+			switch (status) {
+			case OK:
+				return Response.ok().build();
+			case AUTHORIZATION_FAILED:
+				return Response.status(Status.UNAUTHORIZED).build();
+			case UNREACHABLE:
+				return Response.status(Status.NOT_FOUND).build();
+			default:
+				return Response.status(Status.INTERNAL_SERVER_ERROR).build();
+			}
+		} catch (Exception e) {
+			logger.log(Level.WARNING, "An exception occurred while getting metatdata store properties", e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get metadata store properties", httpMethod = "GET", notes = "Retrieve type and URL of underlying metadata store", response = Response.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getMetadataStoreProperties() {
+		try {
+			JSONObject result = new JSONObject();
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			Hashtable<Object, Object> propertyHashtable = (Hashtable<Object, Object>) mds.getProperties();
+			for (Object propKey : propertyHashtable.keySet()) {
+				result.put((String) propKey, (String) propertyHashtable.get(propKey));
+			}
+			String s = result.write();
+			return Response.ok(s).build();
+		} catch (Exception e) {
+			logger.log(Level.WARNING, "An exception occurred while getting metatdata store properties", e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("/referencetypes")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Get list of available reference types", httpMethod = "GET", notes = "Retrieve list of supported metadata object reference types", responseContainer="List", response = MetaDataObject.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response getReferenceTypes() {
+		JSONObject result = new JSONObject();
+		List<String> referenceTypes = null;
+		try {
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			referenceTypes = mds.getReferenceTypes();
+			result = JSONUtils.toJSONObject(referenceTypes);
+			return Response.ok(result.write()).build();
+		} catch (JSONException e) {
+			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + referenceTypes);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("/asset/{assetReference}")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Retrieve asset by reference", httpMethod = "GET", notes = "Retrieve object from metadata repository", response = MetaDataObject.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response retrieveAsset(@ApiParam(value = "Metadata object reference id", required = true) @PathParam("assetReference") String assetReference) {
+		JSONObject result;
+		try {
+			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			MetaDataObject mdo = mds.retrieve(ref);
+			if (mdo != null) {
+				result = JSONUtils.toJSONObject(mdo);
+			} else {
+				// Return empty JSON document to indicate that the result should be null.
+				result = new JSONObject();
+			}
+			return Response.ok(result.write()).build();
+		} catch (JSONException e) {
+			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + assetReference);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("/asset/{assetReference}/{referenceType}")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Retrieve objects referenced by an asset", httpMethod = "GET", notes = "Retrieve referenced metadata objects by reference type", responseContainer="List", response = MetaDataObject.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response retrieveAssetReferences(
+			@ApiParam(value = "Metadata object reference", required = true) @PathParam("assetReference") String assetReference,
+			@ApiParam(value = "Reference type name (including 'PARENT' and 'CHILDREN')", required = true) @PathParam("referenceType") String referenceType) {
+		try {
+			MetaDataObjectReference ref = JSONUtils.fromJSON(assetReference, MetaDataObjectReference.class);
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			List<MetaDataObject> referencedObjects = new ArrayList<MetaDataObject>();
+			if (InternalMetaDataUtils.ODF_PARENT_REFERENCE.equals(referenceType.toUpperCase())) {
+				MetaDataObject parent = mds.getParent(mds.retrieve(ref));
+				if (parent != null) {
+					referencedObjects.add(parent);
+				}
+			} else if (InternalMetaDataUtils.ODF_CHILDREN_REFERENCE.toString().equals(referenceType.toUpperCase())) {
+				referencedObjects = mds.getChildren(mds.retrieve(ref));
+			} else {
+				referencedObjects = mds.getReferences(referenceType.toUpperCase(), mds.retrieve(ref));
+			}
+			List<JSONObject> jsons = new ArrayList<JSONObject>();
+			for (MetaDataObject obj : referencedObjects) {
+				jsons.add(JSONUtils.toJSONObject(obj));
+			}
+			String result = JSONUtils.toJSON(jsons);
+			logger.log(Level.FINE, "Serialized JSON: {0}", result);
+			return Response.ok(result).build();
+		} catch (JSONException e) {
+			logger.warning("Parse exception " + e.getMessage() + " Parsed object: " + assetReference);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("/sampledata")
+	@ApiOperation(value = "Create sample data", httpMethod = "GET", notes = "Populate metadata repository with ODF sample metadata", response = Response.class)
+	@ApiResponses(value = { @ApiResponse(code = 200, message = "OK"), @ApiResponse(code = 500, message = "Internal server error") })
+	public Response createSampleData() {
+		try {
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			mds.createSampleData();
+			return Response.ok().build();
+		} catch (Exception exc) {
+			exc.printStackTrace();
+			throw new RuntimeException(exc);
+		}
+	}
+
+	@POST
+	@Path("/resetalldata")
+	public Response resetAllData() {
+		try {
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			mds.resetAllData();
+			return Response.ok().build();
+		} catch (Exception e) {
+			logger.log(Level.WARNING, "An exception occurred while resetting metatdata store", e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@GET
+	@Path("/search")
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Query metadata repository", httpMethod = "GET", notes = "Search for objects in metadata repository", responseContainer="List", response = MetaDataObjectReference.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error") })
+	public Response search(@ApiParam(value = "Query to be sent to metadata repository (refer to Atlas query notation)", required = true) @QueryParam("query") String query,
+			@ApiParam(value = "Type of results to be returned, 'objects' vs. 'references'", required = false) @QueryParam("resulttype") String resultType) {
+		List<MetaDataObjectReference> queryResults;
+		try {
+			MetadataStore mds = new ODFFactory().create().getMetadataStore();
+			try {
+				queryResults = mds.search(query);
+			} catch(MetadataStoreException e) {
+				logger.log(Level.WARNING, MessageFormat.format("Error processing query ''{0}''.", query), e);
+				return Response.status(Status.BAD_REQUEST).build();
+			}
+			List<JSONObject> jsons = new ArrayList<JSONObject>();
+			if ((resultType != null) && resultType.equals("references")) {
+				for (MetaDataObjectReference ref : queryResults) {
+					jsons.add(JSONUtils.toJSONObject(ref));
+				}
+			} else {
+				// TODO very slow, retrieve results in bulk ?!?
+				//FIXME serialization of each object on its own is necessary because of a jackson issue (https://github.com/FasterXML/jackson-databind/issues/336)
+				//this should be replaced by a custom objectmapper initialization, issue #59 in gitlab
+				for (MetaDataObjectReference ref : queryResults) {
+					MetaDataObject retrievedMdo = mds.retrieve(ref);
+					jsons.add(JSONUtils.toJSONObject(retrievedMdo));
+				}
+			}
+			String result = JSONUtils.toJSON(jsons);
+			logger.log(Level.FINE, "Serialized JSON: {0}", result);
+			return Response.ok(result).build();
+		} catch (Exception exc) {
+			exc.printStackTrace();
+			throw new RuntimeException(exc);
+		}
+	}
+}
diff --git a/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java
new file mode 100755
index 0000000..e203774
--- /dev/null
+++ b/odf/odf-web/src/main/java/org/apache/atlas/odf/admin/rest/resources/SettingsResource.java
@@ -0,0 +1,128 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.admin.rest.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.ApiParam;
+import io.swagger.annotations.ApiResponse;
+import io.swagger.annotations.ApiResponses;
+
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.logging.Logger;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.atlas.odf.admin.rest.RestUtils;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.settings.SettingsManager;
+import org.apache.atlas.odf.api.settings.validation.ValidationException;
+import org.apache.atlas.odf.json.JSONUtils;
+import org.apache.wink.json4j.JSONException;
+
+import org.apache.atlas.odf.api.ODFFactory;
+
+@Path("/settings")
+@Api(value = "/settings", description = "View or update the settings of the Open Discovery Framework", produces = MediaType.APPLICATION_JSON)
+public class SettingsResource {
+
+	private Logger logger = Logger.getLogger(SettingsResource.class.getName());
+
+	@GET
+	@Produces(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Retrieve settings", httpMethod = "GET", notes = "Retrieve current ODF settings", response = ODFSettings.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response getSettings() {
+		logger.entering(SettingsResource.class.getName(), "getConfig");
+		try {
+			return Response.ok(JSONUtils.toJSON(new ODFFactory().create().getSettingsManager().getODFSettingsHidePasswords()), MediaType.APPLICATION_JSON).build();
+		} catch (JSONException e) {
+			e.printStackTrace();
+			logger.info("Parse exception " + e);
+			return RestUtils.createErrorResponse(e);
+		}
+	}
+
+	@POST
+	@Path("/reset")
+	@Produces(MediaType.APPLICATION_JSON)
+	@Consumes(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Reset settings", httpMethod = "POST", notes = "Reset ODF settings to the default", response = Response.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response resetSettings() {
+		logger.entering(SettingsResource.class.getName(), "getConfig");
+		new ODFFactory().create().getSettingsManager().resetODFSettings();
+		return Response.ok().build();
+	}
+
+	@PUT
+	@Produces(MediaType.APPLICATION_JSON)
+	@Consumes(MediaType.APPLICATION_JSON)
+	@ApiOperation(value = "Update settings", httpMethod = "PUT", notes = "Update ODF settings", response = ODFSettings.class)
+	@ApiResponses(value = {
+			@ApiResponse(code = 200, message = "OK"),
+			@ApiResponse(code = 400, message = "Bad Request"),
+			@ApiResponse(code = 500, message = "Internal server error")
+	})
+	public Response changeSettings(@ApiParam(value = "ODF configuration options", required = true) ODFSettings odfConfig) {
+		logger.entering(SettingsResource.class.getName(), "changeConfig");
+		if (odfConfig == null) {
+			return Response.status(Status.BAD_REQUEST).entity("The body must be a valid settings JSON.").build();
+		}
+
+		try {
+			SettingsManager config = new ODFFactory().create().getSettingsManager();
+			config.updateODFSettings(odfConfig);
+			return Response.ok(JSONUtils.toJSON(config.getODFSettingsHidePasswords())).build();
+		} catch (ValidationException e) {
+			e.printStackTrace();
+			logger.info("Validation exception during setting of property " + e.getProperty());
+			return RestUtils.createErrorResponse(e);
+		} catch (JSONException e1) {
+			e1.printStackTrace();
+			return RestUtils.createErrorResponse(MessageFormat.format("The provided input is not valid JSON in form {0}", getEmptyODFConfig()));
+		}
+	}
+
+	private String getEmptyODFConfig() {
+		ODFSettings odf = new ODFSettings();
+		odf.setUserDefined(new HashMap<String, Object>());
+		String emptyJSON = "";
+		try {
+			emptyJSON = JSONUtils.toJSON(odf);
+		} catch (JSONException e2) {
+			e2.printStackTrace();
+		}
+		return emptyJSON;
+	}
+
+
+
+}
diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png
new file mode 100755
index 0000000..fabcc37
--- /dev/null
+++ b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/activity_32.png
Binary files differ
diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png
new file mode 100755
index 0000000..1f3744b
--- /dev/null
+++ b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/applications_32.png
Binary files differ
diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png
new file mode 100755
index 0000000..59a7ff8
--- /dev/null
+++ b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/bar-chart_32.png
Binary files differ
diff --git a/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png
new file mode 100755
index 0000000..4b9bcd3
--- /dev/null
+++ b/odf/odf-web/src/main/resources/org/apache/atlas/odf/images/world_32.png
Binary files differ
diff --git a/odf/odf-web/src/main/webapp/.gitignore b/odf/odf-web/src/main/webapp/.gitignore
new file mode 100755
index 0000000..4cc0506
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/.gitignore
@@ -0,0 +1,19 @@
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+/odf-web.js
+/odf-web.js.map
+/odf-client.js
+/odf-client.js.map
+resources
+resources/**
diff --git a/odf/odf-web/src/main/webapp/WEB-INF/web.xml b/odf/odf-web/src/main/webapp/WEB-INF/web.xml
new file mode 100755
index 0000000..9e16b0d
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<web-app id="WebApp_ID" version="3.0"
+	xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
+	<display-name>odf-admin</display-name>
+	<servlet>
+		<servlet-name>odf-admin-servlet</servlet-name>
+		<servlet-class>org.glassfish.jersey.servlet.ServletContainer</servlet-class>
+		<init-param>
+			<param-name>javax.ws.rs.Application</param-name>
+			<param-value>org.apache.atlas.odf.admin.rest.ODFAdminApp</param-value>
+		</init-param>
+		<load-on-startup>1</load-on-startup>
+		<enabled>true</enabled>
+		<async-supported>false</async-supported>
+	</servlet>
+
+	<servlet-mapping>
+		<servlet-name>odf-admin-servlet</servlet-name>
+		<url-pattern>/odf/api/v1/*</url-pattern>
+	</servlet-mapping>
+
+	<security-constraint>
+		<web-resource-collection>
+			<web-resource-name>Secure resources</web-resource-name>
+			<url-pattern>/*</url-pattern>
+		</web-resource-collection>
+		<auth-constraint>
+			<role-name>admin</role-name>
+			<role-name>user</role-name>
+			<role-name>moderator</role-name>
+		</auth-constraint>
+	</security-constraint>
+	<login-config>
+		<auth-method>BASIC</auth-method>
+		<realm-name>ODF Realm</realm-name>
+	</login-config>
+</web-app>
diff --git a/odf/odf-web/src/main/webapp/client_index.html b/odf/odf-web/src/main/webapp/client_index.html
new file mode 100755
index 0000000..ea85c87
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/client_index.html
@@ -0,0 +1,31 @@
+<!doctype html>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<html>
+<head>
+  <meta charset="utf-8">
+  <title>Data lake application</title>
+</head>
+<body>
+   <div id="odf-toplevel-div" class="container-fluid">
+     Loading...
+   </div>
+   <script type="text/javascript" src="odf-config.js"></script>
+   <script type="text/javascript" src="odf-client.js"></script>
+</body>
+</html>
diff --git a/odf/odf-web/src/main/webapp/img/lg_proc.gif b/odf/odf-web/src/main/webapp/img/lg_proc.gif
new file mode 100755
index 0000000..7dd40ef
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/img/lg_proc.gif
Binary files differ
diff --git a/odf/odf-web/src/main/webapp/index.html b/odf/odf-web/src/main/webapp/index.html
new file mode 100755
index 0000000..f224997
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/index.html
@@ -0,0 +1,31 @@
+<!doctype html>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<html>
+<head>
+  <meta charset="utf-8">
+  <title>Open Discovery Framework</title>
+</head>
+<body>
+   <div id="odf-toplevel-div" class="container-fluid">
+     Loading...
+   </div>
+   <script type="text/javascript" src="odf-config.js"></script>
+   <script type="text/javascript" src="odf-web.js"></script>
+</body>
+</html>
diff --git a/odf/odf-web/src/main/webapp/odf-config.js b/odf/odf-web/src/main/webapp/odf-config.js
new file mode 100755
index 0000000..6bb4a47
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/odf-config.js
@@ -0,0 +1,15 @@
+/**
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const API_PATH = "odf/api/v1/";
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js b/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js
new file mode 100755
index 0000000..67bb709
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-analysis-request.js
@@ -0,0 +1,473 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var bootstrap = require("bootstrap");
+
+var React = require("react");
+var ReactDOM = require("react-dom");
+var LinkedStateMixin = require('react-addons-linked-state-mixin');
+var ReactBootstrap = require("react-bootstrap");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var configurationStore = require("./odf-configuration-store.js");
+var metadataStore = require("./odf-utils.js").MetadataStore;
+var ODFGlobals = require("./odf-globals.js");
+
+var Button = ReactBootstrap.Button;
+var Row = ReactBootstrap.Row;
+var Col = ReactBootstrap.Col;
+var Table = ReactBootstrap.Table;
+var Modal = ReactBootstrap.Modal;
+var Input = ReactBootstrap.Input;
+var Alert = ReactBootstrap.Alert;
+var Panel = ReactBootstrap.Panel;
+var Label = ReactBootstrap.Label;
+var Input = ReactBootstrap.Input;
+var Image = ReactBootstrap.Image;
+
+var OdfAnalysisRequest = {
+	NewAnalysisRequestButton : React.createClass({
+
+		getInitialState : function(){
+			return {showAnalysisRequestDialog : false};
+		},
+
+		open : function(){
+			this.setState({showAnalysisRequestDialog: true});
+		},
+
+		onClose : function(){
+			this.setState({showAnalysisRequestDialog: false});
+			if(this.props.onClose){
+				this.props.onClose();
+			}
+		},
+
+		render : function() {
+			return (
+					<span>
+						<Button bsStyle={this.props.bsStyle} onClick={this.open}>Start analysis (service sequence)</Button>
+						<OdfAnalysisRequest.NewAnalysisRequestDialog show={this.state.showAnalysisRequestDialog} dataSetId={this.props.dataSetId} alertCallback={this.props.alertCallback} onClose={this.onClose}/>
+					</span>
+			);
+		}
+
+	}),
+
+	NewAnalysisRequestDialog : React.createClass({
+
+	  mixins : [AJAXCleanupMixin],
+
+	  getInitialState : function() {
+	    return ({config: null, discoveryServices: [], errorMessage: null, discoveryServiceSequence: []});
+	  },
+
+	  close : function() {
+		  this.clearDialogState();
+		  if(this.props.onClose){
+			  this.props.onClose();
+		  }
+	  },
+
+	  submitRequest : function() {
+		this.setState({requestInProgress : true});
+	    var dataSet = this.refs.inputDataSet.getValue();
+	    var discoveryServiceIDs = $.map(this.state.discoveryServiceSequence,
+	       function(dsreg) {
+	          return dsreg.id;
+	       }
+	    );
+
+	    var repositoryId = this.state.repositoryId;
+	    var metadataObjectRef = {
+	      repositoryId: repositoryId,
+	      id: dataSet
+	    };
+	    var analysisRequest = {
+	      dataSets: [metadataObjectRef],
+	      discoveryServiceSequence: discoveryServiceIDs
+	    };
+
+	    // now post request
+	    // clear alert
+	    if(this.props.alertCallback){
+	    	this.props.alertCallback({type: "", message: ""});
+	    }
+	    var req = $.ajax({
+	      url: ODFGlobals.analysisUrl,
+	      contentType: "application/json",
+	      dataType: 'json',
+	      type: 'POST',
+	      data: JSON.stringify(analysisRequest),
+	      success: function(analysisResponse) {
+	        if(!this.isMounted()){
+	        	return;
+	        }
+	    	if (analysisResponse.invalidRequest) {
+	          this.setState({errorMessage: analysisResponse.details, requestInProgress: false});
+	        } else {
+	          var msg = "Analysis request was started. ID: " + analysisResponse.id;
+	          if(this.props.alertCallback){
+	      	    this.props.alertCallback({type: "success", message: msg});
+	          }
+	      	  this.close();
+	        }
+	      }.bind(this),
+	      error: function(xhr, status, err) {
+	        var msg = "Error while reading ODF services: " + err.toString();
+	        this.setState({errorMessage: msg, requestInProgress: false});
+	      }.bind(this)
+	    });
+
+	    this.storeAbort(req.abort);
+	  },
+
+	  componentDidMount : function() {
+		  this.loadDiscoveryServices();
+	  },
+
+	  loadDiscoveryServices : function() {
+	    var req = configurationStore.readConfig(
+	      function(config) {
+	    	if(!this.isMounted()){
+	        	return;
+	        }
+	        this.setState({config: config});
+	        // clear alert
+	        if(this.props.alertCallback){
+	        	this.props.alertCallback({type: "", message: ""});
+	        }
+	        var req2 = $.ajax({
+	          url: ODFGlobals.servicesUrl,
+	          dataType: 'json',
+	          type: 'GET',
+	          success: function(data) {
+	        	if(!this.isMounted()){
+	  	        	return;
+	  	        }
+	            this.setState({discoveryServices: data});
+	          }.bind(this),
+	          error: function(xhr, status, err) {
+	            var msg = "Error while reading ODF services: " + err.toString();
+	            if(this.props.alertCallback){
+	        	    this.props.alertCallback({type: "danger", message: msg});
+	            }
+	         }.bind(this)
+	        });
+	        this.storeAbort(req2.abort);
+	      }.bind(this),
+	      this.props.alertCallback
+	    );
+
+	    this.storeAbort(req.abort);
+	  },
+
+	  getDiscoveryServiceFromId : function(id) {
+	      var servicesWithSameId = this.state.discoveryServices.filter(
+	         function(dsreg) {
+	             return dsreg.id == id;
+	         }
+	      );
+	      if (servicesWithSameId.length > 0) {
+	        return servicesWithSameId[0];
+	      }
+	      return null;
+	  },
+
+	  processDiscoveryServiceSelection : function() {
+	      var selection = this.refs.inputAvailableDiscoveryServices.getValue();
+	      var dsreg = this.getDiscoveryServiceFromId(selection);
+	      if (dsreg) {
+	        var newSequence = this.state.discoveryServiceSequence.slice();
+	        newSequence.push(dsreg);
+	        this.setState({discoveryServiceSequence: newSequence});
+	      }
+	  },
+
+	  clearDialogState : function() {
+	      this.setState({discoveryServiceSequence: [], requestInProgress : false, });
+	  },
+
+	  render : function() {
+	     var alert = null;
+	     if (this.state.errorMessage) {
+	        alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
+	     }
+	     var servicesOptions = $.map(
+	            this.state.discoveryServices,
+	            function(dsreg) {
+	              return (<option key={dsreg.id} value={dsreg.id}>{dsreg.name}</option>);
+	            }.bind(this)
+	        );
+
+	     var discoveryServiceSequenceComponents = $.map(this.state.discoveryServiceSequence,
+	         function(dsreg) {
+	            return <li key={dsreg.id}>{dsreg.name} ({dsreg.id})</li>
+	         }
+	     );
+
+	     var waitingContainer = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
+	     if(!this.state.requestInProgress){
+	    	 waitingContainer = null;
+	     }
+
+	     return (
+	       <Modal show={this.props.show} onHide={this.close}>
+	         <Modal.Header closeButton>
+	            <Modal.Title>Start analysis (specify service sequence)</Modal.Title>
+	         </Modal.Header>
+	         <Modal.Body>
+	         	{waitingContainer}
+	            {alert}
+	            <Input type="text" ref="inputDataSet" label="Data Set" value={this.props.dataSetId} readOnly={this.props.dataSetId}></Input>
+	            <hr/>
+	            Select a service from the "Available Services"
+	            dropdown to append it to the sequence. Repeat selection to run multiple services for the data set.
+	            <Input type="select" onChange={this.processDiscoveryServiceSelection} ref="inputAvailableDiscoveryServices" label="Available Services">
+	              <option key="emptySelection">&lt;Select a service...&gt;</option>
+	              {servicesOptions}
+	            </Input>
+	            <strong>Service Sequence</strong>
+	            <ol>{discoveryServiceSequenceComponents}</ol>
+	            <hr />
+	            <Button bsStyle="warning" onClick={this.clearDialogState}>Clear Sequence</Button>
+	        </Modal.Body>
+	        <Modal.Footer>
+	        <Button onClick={this.submitRequest} bsStyle="primary">Submit</Button>
+	        <Button onClick={this.close} >Cancel</Button>
+	        </Modal.Footer>
+	       </Modal>
+	     );
+	  }
+
+	}),
+
+	NewCreateAnnotationsButton : React.createClass({
+
+		getInitialState : function(){
+			return {showCreateAnnotationsDialog : false};
+		},
+
+		open : function(){
+			this.setState({showCreateAnnotationsDialog: true});
+		},
+
+		onClose : function(){
+			this.setState({showCreateAnnotationsDialog: false});
+			if(this.props.onClose){
+				this.props.onClose();
+			}
+		},
+
+		render : function() {
+			return (
+					<span>
+						<Button bsStyle={this.props.bsStyle} onClick={this.open}>Start analysis (annotation types)</Button>
+						<OdfAnalysisRequest.NewCreateAnnotationsDialog show={this.state.showCreateAnnotationsDialog} dataSetId={this.props.dataSetId} alertCallback={this.props.alertCallback} onClose={this.onClose}/>
+					</span>
+			);
+		}
+
+	}),
+
+	NewCreateAnnotationsDialog : React.createClass({
+
+		  mixins : [AJAXCleanupMixin],
+
+		  getInitialState : function() {
+		    return ({config: null, annotationTypes: [], errorMessage: null, analysisTypeSelection: []});
+		  },
+
+		  close : function() {
+			  this.clearDialogState();
+			  if(this.props.onClose){
+				  this.props.onClose();
+			  }
+		  },
+
+		  submitRequest : function() {
+			this.setState({requestInProgress : true});
+		    var dataSet = this.refs.inputDataSet.getValue();
+		    var annotationTypeIDs = $.map(this.state.analysisTypeSelection,
+		       function(annotationTypeId) {
+		          return annotationTypeId;
+		       }
+		    );
+
+		    var repositoryId = this.state.repositoryId;
+		    var metadataObjectRef = {
+		      repositoryId: repositoryId,
+		      id: dataSet
+		    };
+		    var analysisRequest = {
+		      dataSets: [metadataObjectRef],
+		      annotationTypes: annotationTypeIDs
+		    };
+
+		    // now post request
+		    // clear alert
+		    if(this.props.alertCallback){
+		    	this.props.alertCallback({type: "", message: ""});
+		    }
+		    var req = $.ajax({
+		      url: ODFGlobals.analysisUrl,
+		      contentType: "application/json",
+		      dataType: 'json',
+		      type: 'POST',
+		      data: JSON.stringify(analysisRequest),
+		      success: function(analysisResponse) {
+		        if(!this.isMounted()){
+		        	return;
+		        }
+		    	if (analysisResponse.invalidRequest) {
+		          this.setState({errorMessage: analysisResponse.details, requestInProgress: false});
+		        } else {
+		          var msg = "Analysis request was started. ID: " + analysisResponse.id;
+		          if(this.props.alertCallback){
+		      	    this.props.alertCallback({type: "success", message: msg});
+		          }
+		      	  this.close();
+		        }
+		      }.bind(this),
+		      error: function(xhr, status, err) {
+		        var msg = "Error starting discovery request: " + err.toString();
+		        this.setState({errorMessage: msg, requestInProgress: false});
+		      }.bind(this)
+		    });
+
+		    this.storeAbort(req.abort);
+		  },
+
+		  componentDidMount : function() {
+			  this.loadannotationTypes();
+		  },
+
+		  loadannotationTypes : function() {
+		    var req = configurationStore.readConfig(
+		      function(config) {
+		    	if(!this.isMounted()){
+		        	return;
+		        }
+		        this.setState({config: config});
+		        // clear alert
+		        if(this.props.alertCallback){
+		        	this.props.alertCallback({type: "", message: ""});
+		        }
+		        var req2 = $.ajax({
+		          url: ODFGlobals.servicesUrl,
+		          dataType: 'json',
+		          type: 'GET',
+		          success: function(data) {
+		        	if(!this.isMounted()){
+		  	        	return;
+		  	        }
+		            var ids = [];
+		            $.each(data, function(key, dsreg){
+			            $.each(dsreg.resultingAnnotationTypes, function(key, annotationTypeId){
+			            	if($.inArray(annotationTypeId,ids) == -1){
+				            	ids.push(annotationTypeId);
+			            	};
+			            });
+		            });
+		            this.setState({annotationTypes: ids});
+		          }.bind(this),
+		          error: function(xhr, status, err) {
+		            var msg = "Error while reading ODF services: " + err.toString();
+		            if(this.props.alertCallback){
+		        	    this.props.alertCallback({type: "danger", message: msg});
+		            }
+		         }.bind(this)
+		        });
+		        this.storeAbort(req2.abort);
+		      }.bind(this),
+		      this.props.alertCallback
+		    );
+			 metadataStore.getProperties(
+					 function(data) {
+					     this.setState({repositoryId: data.STORE_PROPERTY_ID});
+					 }.bind(this)
+			 );
+		    this.storeAbort(req.abort);
+		  },
+
+		  processAnalysisTypeSelection : function() {
+		      var selection = this.refs.inputAvailableAnnotationTypes.getValue();
+		      if (selection) {
+		        var newSelection = this.state.analysisTypeSelection.slice();
+		        newSelection.push(selection);
+		        this.setState({analysisTypeSelection: newSelection});
+		      }
+		  },
+
+		  clearDialogState : function() {
+		      this.setState({analysisTypeSelection: [], requestInProgress : false, });
+		  },
+
+		  render : function() {
+		     var alert = null;
+		     if (this.state.errorMessage) {
+		        alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
+		     }
+		     var analysisTypeOptions = $.map(
+			            this.state.annotationTypes,
+			            function(annotationTypeId) {
+			              return (<option key={annotationTypeId} value={annotationTypeId}>{annotationTypeId}</option>);
+			            }.bind(this)
+			        );
+
+		     var analysisTypeSelectionComponents = $.map(this.state.analysisTypeSelection,
+		         function(annotationTypeId) {
+		            return <li key={annotationTypeId}>{annotationTypeId}</li>
+		         }
+		     );
+
+		     var waitingContainer = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
+		     if(!this.state.requestInProgress){
+		    	 waitingContainer = null;
+		     }
+
+		     return (
+		       <Modal show={this.props.show} onHide={this.close}>
+		         <Modal.Header closeButton>
+		            <Modal.Title>Start analysis (specify annotation types)</Modal.Title>
+		         </Modal.Header>
+		         <Modal.Body>
+		         	{waitingContainer}
+		            {alert}
+		            <Input type="text" ref="inputDataSet" label="Data Set" value={this.props.dataSetId} readOnly={this.props.dataSetId}></Input>
+		            <hr/>
+		            Select an annotation type from the "Available Annotation Types"
+		            dropdown to append it to the list. Repeat selection to create multiple annotation types for the data set.
+		            <Input type="select" onChange={this.processAnalysisTypeSelection} ref="inputAvailableAnnotationTypes" label="Available Annotation Types">
+		              <option key="emptySelection">&lt;Select an annotation type...&gt;</option>
+		              {analysisTypeOptions}
+		            </Input>
+		            <strong>Selected Annotation Types</strong>
+		            <ol>{analysisTypeSelectionComponents}</ol>
+		            <hr />
+		            <Button bsStyle="warning" onClick={this.clearDialogState}>Clear Selection</Button>
+		        </Modal.Body>
+		        <Modal.Footer>
+		        <Button onClick={this.submitRequest} bsStyle="primary">Submit</Button>
+		        <Button onClick={this.close} >Cancel</Button>
+		        </Modal.Footer>
+		       </Modal>
+		     );
+		  }
+
+		})
+}
+
+
+module.exports = OdfAnalysisRequest;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-client.js b/odf/odf-web/src/main/webapp/scripts/odf-client.js
new file mode 100755
index 0000000..de64367
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-client.js
@@ -0,0 +1,1087 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+require("bootstrap/dist/css/bootstrap.min.css");
+
+var $ = require("jquery");
+var bootstrap = require("bootstrap");
+
+var React = require("react");
+var ReactDOM = require("react-dom");
+var LinkedStateMixin = require('react-addons-linked-state-mixin');
+var ReactBootstrap = require("react-bootstrap");
+
+var Nav = ReactBootstrap.Nav;
+var NavItem = ReactBootstrap.NavItem;
+var Navbar = ReactBootstrap.Navbar;
+var NavDropdown = ReactBootstrap.NavDropdown;
+var Button = ReactBootstrap.Button;
+var Grid = ReactBootstrap.Grid;
+var Row = ReactBootstrap.Row;
+var Col = ReactBootstrap.Col;
+var Table = ReactBootstrap.Table;
+var Modal = ReactBootstrap.Modal;
+var Alert = ReactBootstrap.Alert;
+var Panel = ReactBootstrap.Panel;
+var Label = ReactBootstrap.Label;
+var Input = ReactBootstrap.Input;
+var Jumbotron = ReactBootstrap.Jumbotron;
+var Image = ReactBootstrap.Image;
+var Dropdown = ReactBootstrap.Dropdown;
+var DropdownButton = ReactBootstrap.DropdownButton;
+var CustomMenu = ReactBootstrap.CustomMenu;
+var MenuItem = ReactBootstrap.MenuItem;
+var Tooltip = ReactBootstrap.Tooltip;
+var OverlayTrigger = ReactBootstrap.OverlayTrigger;
+var Glyphicon = ReactBootstrap.Glyphicon;
+
+var ODFGlobals = require("./odf-globals.js");
+var OdfAnalysisRequest = require("./odf-analysis-request.js");
+var NewAnalysisRequestButton = OdfAnalysisRequest.NewCreateAnnotationsButton;
+var ODFBrowser = require("./odf-metadata-browser.js");
+var Utils = require("./odf-utils.js");
+var AtlasHelper = Utils.AtlasHelper;
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var UISpec = require("./odf-ui-spec.js");
+
+
+var knownAnnotations = {
+	"Default": [{value : "annotationType", style : "primary", label: "Unknown"}],
+	"ColumnAnalysisColumnAnnotation" : [{value: "jsonProperties.inferredDataClass.className", style: "danger" , label: "Class name"}, {value: "jsonProperties.inferredDataType.type", style: "info", label :"Datatype"}],
+	"DataQualityColumnAnnotation": [{style: "warning", value: "jsonProperties.qualityScore" , label: "Data quality score"}],
+    "MatcherAnnotation": [{style: "success", value: "jsonProperties.termAssignments", label: "Matching terms"}]
+};
+
+////////////////////////////////////////////////////////////////
+// toplevel navigation bar
+
+const constants_ODFNavBar = {
+  odfDataLakePage: "navKeyDataLakePage",
+  odfTermPage: "navKeyTermPage"
+}
+
+var ODFNavBar = React.createClass({
+   render: function() {
+       return (
+         <Navbar inverse>
+           <Navbar.Header>
+             <Navbar.Brand>
+               <b>Shop for Data Application, powered by Open Discovery Framework</b>
+             </Navbar.Brand>
+             <Navbar.Toggle />
+           </Navbar.Header>
+           <Navbar.Collapse>
+             <Nav pullRight activeKey={this.props.activeKey} onSelect={this.props.selectCallback}>
+               <NavItem eventKey={constants_ODFNavBar.odfDataLakePage} href="#">Data Lake Browser</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.odfTermPage} href="#">Glossary</NavItem>
+             </Nav>
+           </Navbar.Collapse>
+         </Navbar>
+       );
+   }
+});
+
+var ODFAnnotationLegend = React.createClass({
+
+	render : function(){
+		var items = [];
+		$.each(knownAnnotations, function(key, val){
+			$.each(val, function(key2, item){
+				items.push(<Label key={key + "_" + key2} bsStyle={item.style}>{item.label}</Label>);
+			});
+		});
+
+		return <div>{items}</div>;
+	}
+
+});
+
+var ODFAnnotationMarker = React.createClass({
+
+	render : function(){
+		var annotationKey = "Default";
+		var annotationLabels = [];
+		if(this.props.annotation && knownAnnotations[this.props.annotation.annotationType]){
+			annotationKey = this.props.annotation.annotationType;
+			var tooltip = <Tooltip id={this.props.annotation.annotationType}>{this.props.annotation.annotationType}<br/>{this.props.annotation.summary}</Tooltip>
+			$.each(knownAnnotations[annotationKey], function(key, val){
+				var style = val.style;
+				var value = ODFGlobals.getPathValue(this.props.annotation, val.value);
+				if (annotationKey === "MatcherAnnotation") {
+					value = value[0].matchingString; // if no abbreviation matches this will be the term; ideally it should be based on the OMBusinessTerm reference
+				}
+				else if(value && !isNaN(value)){
+					value = Math.round(value*100) + " %";
+				}
+				annotationLabels.push(<OverlayTrigger key={key} placement="top" overlay={tooltip}><Label style={{margin: "5px"}} bsStyle={style}>{value}</Label></OverlayTrigger>);
+			}.bind(this));
+		}else{
+			var tooltip = <Tooltip id={this.props.annotation.annotationType}>{this.props.annotation.annotationType}<br/>{this.props.annotation.summary}</Tooltip>
+			annotationLabels.push(<OverlayTrigger key="unknownAnnotation" placement="top" overlay={tooltip}><Label style={{margin: "5px"}} bsStyle={knownAnnotations[annotationKey][0].style}>{this.props.annotation.annotationType}</Label></OverlayTrigger>);
+		}
+
+		return <div style={this.props.style}>{annotationLabels}</div>;
+	}
+});
+
+
+var AnnotationsColumn = React.createClass({
+	mixins : [AJAXCleanupMixin],
+
+	getInitialState : function(){
+		return {annotations: []};
+	},
+
+	componentDidMount : function() {
+		if(this.props.annotations){
+			this.setState({loadedAnnotations : this.props.annotations});
+			return;
+		}
+
+		if(this.props.annotationReferences){
+			this.loadColumnAnnotations(this.props.annotationReferences);
+		}
+	},
+
+	componentWillReceiveProps : function(nextProps){
+		if(!this.isMounted()){
+			return;
+		}
+
+		if(nextProps.annotations){
+			this.setState({loadedAnnotations : nextProps.annotations});
+			return;
+		}
+	},
+
+	render : function(){
+		if(this.state){
+			var annotations = this.state.loadedAnnotations;
+			if(!annotations || annotations.length > 0 && annotations[0].repositoryId){
+				return <noscript/>;
+			}
+
+			var processedTypes = [];
+			var colAnnotations = [];
+			$.each(annotations, function(key, val){
+				if(processedTypes.indexOf(val.annotationType) == -1){
+					processedTypes.push(val.annotationType);
+					var style = {float: "left"};
+					if(key % 6 == 0){
+						style = {clear: "both"};
+					}
+
+					var summary = (val.summary ? val.summary : "");
+					colAnnotations.push(<ODFAnnotationMarker style={style} key={key} annotation={val}/>);
+				}
+			});
+
+			return <div>{colAnnotations}</div>;
+		}
+		return <noscript/>;
+	}
+
+});
+
+var QualityScoreFilter = React.createClass({
+
+	getInitialState : function(){
+		return {key: "All", val : "0", showMenu : false};
+	},
+
+	onSelect : function(obj, key){
+
+		if(obj.target.tagName != "INPUT"){
+			this.setState({key: key});
+			var equation = "All";
+			if(key != "All"){
+				if(this.refs.numberInput.getValue().trim() == ""){
+					return;
+				}
+				equation = key + this.refs.numberInput.getValue();
+			}
+			this.props.onFilter(equation);
+		}
+	},
+
+	textChange : function(event){
+		var equation = "All";
+		if(this.state.key != "All"){
+			if(this.refs.numberInput.getValue().trim() == ""){
+				return;
+			}
+			equation = this.state.key + this.refs.numberInput.getValue();
+		}
+		this.props.onFilter(equation);
+	},
+
+	render : function(){
+		var items = [];
+		var values = ["<", "<=", "==", ">=", ">", "!=", "All"];
+		$.each(values, function(key, val){
+			items.push(<MenuItem onSelect={this.onSelect} id={val} key={key} eventKey={val}>{val}</MenuItem>)
+		}.bind(this));
+
+		var menu = <div bsRole="menu" className={"dropdown-menu"}>
+			<h5 style={{float: "left", marginLeft: "15px"}}><Label ref="typeLabel">{this.state.key}</Label></h5>
+			<Input style={{width: "100px"}} ref="numberInput" onChange={this.textChange} type="number" defaultValue="1"/>
+			{items}
+		</div>;
+
+		return <div style={this.props.style}  >
+			<Dropdown id="quality score select" onSelect={this.onSelect} open={this.state.showMenu} onToggle={function(){}}>
+				<Button bsRole="toggle" onClick={function(){this.setState({showMenu: !this.state.showMenu})}.bind(this)}>Qualityscore filter</Button>
+				{menu}
+			</Dropdown>
+		</div>;
+	}
+});
+
+var DataClassFilter = React.createClass({
+
+	defaultClasses : ["US Zip", "Credit Card"],
+
+	render : function(){
+		var items = [];
+		var classes = (this.props.dataClasses ? this.props.dataClasses.slice() : this.defaultClasses);
+		classes.push("All");
+		$.each(classes, function(key, val){
+			items.push(<MenuItem id={val} key={key} eventKey={val}>{val}</MenuItem>)
+		});
+
+		return <div style={this.props.style}>
+			<DropdownButton id="Data class filter" onSelect={function(obj, key){this.props.onFilter(key)}.bind(this)} title="Data Class filter">
+				{items}
+			</DropdownButton>
+		</div>;
+	}
+});
+
+
+var FilterMenu = React.createClass({
+
+	getInitialState : function(){
+		return {showMenu : false, dataClassFilter: "All", qualityScoreFilter: "All"};
+	},
+
+	onQualityScoreFilter: function(param){
+		this.setState({qualityScoreFilter: param});
+		if(this.props.onFilter){
+			this.props.onFilter({dataClassFilter: this.state.dataClassFilter, qualityScoreFilter: param});
+		}
+	},
+
+	onDataClassFilter : function(param){
+		this.setState({dataClassFilter: param});
+		if(this.props.onFilter){
+			this.props.onFilter({dataClassFilter: param, qualityScoreFilter: this.state.qualityScoreFilter});
+		}
+	},
+
+	render : function(){
+		var menu = <div bsRole="menu" className={"dropdown-menu"}>
+			<QualityScoreFilter onFilter={this.onQualityScoreFilter}/>
+			<br />
+			<DataClassFilter dataClasses={this.props.dataClasses} onFilter={this.onDataClassFilter}  />
+		</div>;
+
+		return <div style={this.props.style}  >
+			<Dropdown id="filter menu" open={this.state.showMenu} onToggle={function(){}}>
+				<Button bsRole="toggle" onClick={function(){this.setState({showMenu: !this.state.showMenu})}.bind(this)}>Filter annotations</Button>
+				{menu}
+			</Dropdown>
+		</div>;
+	}
+
+});
+
+
+var SelectCheckbox = React.createClass({
+
+	getInitialState : function(){
+		return {selected : this.props.asset.isSelected};
+	},
+
+	componentWillReceiveProps : function(nextProps){
+		if(!this.isMounted()){
+			return;
+		}
+		if(nextProps.asset.reference.id != this.props.asset.reference.id){
+			this.setState({selected : nextProps.asset.isSelected});
+		}
+	},
+
+	onChange : function(selected){
+		if(this.props.onChange){
+			this.props.onChange(selected);
+		}
+		this.setState({selected : selected});
+	},
+
+	render : function(){
+		return <div><Input style={{marginTop: "-6px"}} type="checkbox" label=" " checked={this.state.selected} onChange={function(e){
+			this.onChange($(e.target).prop("checked"));
+		}.bind(this)}/></div>;
+	}
+
+});
+
+var ODFDataLakePage = React.createClass({
+
+	columnAnnotations : {},
+
+	getInitialState : function(){
+		return {
+			ajaxAborts : [],
+			sourceLoading: false,
+			columns: [],
+			dataClasses: [],
+			qualityScoreFilter: "All",
+			dataClassFilter: "All",
+			importFeedback: {msg: null, style: "primary"}
+		};
+	},
+
+	componentDidMount : function() {
+		this.loadSources();
+	},
+
+	loadSources : function(){
+		this.searchAtlasMetadata("from RelationalDataSet", function(data){
+			 $.each(data, function(key, source){
+				 source.isSelected = false;
+			  });
+			this.setState({filteredSources: data, sources: data});
+		}.bind(this));
+	},
+
+	searchAtlasMetadata : function(query, successCallback, errorCallback) {
+		var url = ODFGlobals.metadataUrl + "/search?" + $.param({query: query});
+		$.ajax({
+			url: url,
+			dataType: 'json',
+			type: 'GET',
+			success: function(data) {
+				successCallback(data);
+			},
+			error: function(xhr, status, err) {
+				console.error(url, status, err.toString());
+				var msg = "Error while loading metadata: " + err.toString();
+				if(errorCallback){
+					errorCallback(msg);
+				}
+			}
+		});
+	 },
+
+	load : function(assetRef){
+		$.each(this.state.ajaxAborts, function(key, abort){
+			if(abort && abort.call){
+				abort.call();
+			}
+		});
+		this.setState({ajaxAborts : []});
+
+		var req = AtlasHelper.loadAtlasAsset(assetRef, function(data){
+			var source = data;
+			var refresh = false;
+			if(this.state == null || this.state.selectedTable == null || this.state.selectedTable.reference.id != source.reference.id){
+				console.log("set state source " + new Date().toLocaleString());
+				this.setState({selectedTable: source});
+				if(source.annotations == null){
+					source.annotations = [];
+				}
+				if(source.columns == null){
+					source.columns = [];
+				}
+			}else{
+				source.annotations = this.state.selectedTable.annotations;
+				refresh = true;
+			}
+
+			this.loadSourceAnnotations(source, refresh);
+			this.loadColumns(source, refresh);
+		}.bind(this), function(){
+
+		});
+	},
+
+	loadSourceAnnotations : function(source, refresh){
+		if(!refresh || !source.loadedAnnotations){
+			source.loadedAnnotations = [];
+		}
+        var reqs = AtlasHelper.loadMostRecentAnnotations(source.reference, function(annotationList){
+            if (refresh) {
+            	var newAnnotations = [];
+            	if(source.loadedAnnotations.length > 0){
+            		$.each(annotationList, function(key, val){
+            			if(!this.atlasAssetArrayContains(source.loadedAnnotations, val)){
+            				newAnnotations.push(val);
+            			}
+            		}.bind(this));
+            	}else{
+            		newAnnotations = annotationList;
+            	}
+                source.loadedAnnotations = newAnnotations;
+            }else{
+            	source.loadedAnnotations = annotationList;
+            }
+            console.log("set state source anns " + new Date().toLocaleString());
+            this.setState({selectedTable: source});
+        }.bind(this), function(){
+
+        });
+
+        var ajaxAborts = [];
+		$.each(reqs, function(key, req){
+			ajaxAborts.push(req.abort);
+		}.bind(this))
+		this.setState({ajaxAborts : ajaxAborts});
+	},
+
+	atlasAssetArrayContains : function(array, obj){
+		for(var no = 0; no < array.length; no++){
+			var val = array[no];
+			if(val && val.reference && obj && obj.reference && val.reference.id == obj.reference.id){
+				return true;
+			}
+		}
+		return false;
+	},
+
+	loadColumns : function(dataSet, refresh){
+		var columns = [];
+		if(refresh){
+			columns = this.state.columns;
+		}
+		var reqs = AtlasHelper.loadRelationalDataSet(dataSet, function(result){
+			var foundAnnotations = false;
+			if(!refresh){
+				$.each(result, function(key, col){
+					if(col.annotations && col.annotations.length > 0){
+						foundAnnotations = true;
+					}
+					if(col.isSelected == null || col.isSelected == undefined){
+						col.isSelected = false;
+					}
+					columns.push(col);
+				});
+			}else{
+				//if result size is different, reset completely
+				if(result.length != columns.length){
+					columns = [];
+				}
+				//if the old array contains any column that is not in the new columns, reset completely
+				$.each(columns, function(key, col){
+					if(!this.atlasAssetArrayContains(result, col)){
+						columns = [];
+					}
+				}.bind(this));
+				$.each(result, function(key, col){
+					//only add new columns
+					if(!this.atlasAssetArrayContains(columns, col)){
+						columns.push(col);
+					}
+					if(col.annotations && col.annotations.length > 0){
+						for(var no = 0; no < columns.length; no++){
+							if(columns[no] == null || columns[no] == undefined){
+								col.isSelected = false;
+							}
+							if(columns[no].reference.id == col.reference.id){
+								columns[no].annotations = col.annotations;
+								break;
+							}
+						}
+						foundAnnotations = true;
+					}
+				}.bind(this));
+			}
+
+			if(!foundAnnotations){
+				if(!Utils.arraysEqual(this.state.columns, columns)){
+					console.log("set state columns " + new Date().toLocaleString());
+					this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns});
+				}else{
+					console.log("columns same, no annotations, dont update");
+				}
+			}else{
+				this.loadColumnAnnotations(columns, refresh);
+			}
+		}.bind(this), function(){
+
+		});
+
+        var ajaxAborts = [];
+		$.each(reqs, function(key, req){
+			ajaxAborts.push(req.abort);
+		}.bind(this))
+		this.setState({ajaxAborts : ajaxAborts});
+	},
+
+	loadColumnAnnotations : function(columns, refresh){
+		var annotationRefs = [];
+		$.each(columns, function(key, col){
+			if(!refresh || !col.loadedAnnotations){
+				col.loadedAnnotations = [];
+			}
+		});
+
+		var requests = [];
+		var annotationsChanged = false;
+		var dataClasses = [];
+		$.each(columns, function(key, column){
+			var req = AtlasHelper.loadMostRecentAnnotations(column.reference, function(annotations){
+				$.each(annotations, function(key, annotation){
+					if(!this.atlasAssetArrayContains(column.loadedAnnotations, annotation)){
+						annotationsChanged = true;
+						column.loadedAnnotations.push(annotation);
+					}
+					if(annotation &&
+							annotation.inferredDataClass && dataClasses.indexOf(annotation.inferredDataClass.className) == -1){
+						dataClasses.push(annotation.inferredDataClass.className);
+					}
+				}.bind(this));
+			}.bind(this));
+			requests.push(req);
+		}.bind(this));
+
+		$.when.apply(undefined, requests).done(function(){
+			if(annotationsChanged){
+				console.log("set state column anns " + new Date().toLocaleString());
+				this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns, dataClasses: dataClasses});
+			}else{
+				if(!Utils.arraysEqual(this.state.columns, columns)){
+					console.log("set state column anns " + new Date().toLocaleString());
+					this.setState({currentlyLoading : false, columns: columns, filteredColumns: columns});
+				}else{
+					console.log("columns same, annotations same, dont update");
+				}
+			}
+		}.bind(this));
+
+        var ajaxAborts = [];
+		$.each(requests, function(key, req){
+			ajaxAborts.push(req.abort);
+		}.bind(this));
+		this.setState({ajaxAborts : ajaxAborts});
+	},
+
+	storeColumnAnnotation : function(columnId, annotation){
+		if(!this.columnAnnotations[columnId]){
+			this.columnAnnotations[columnId] = [];
+		}
+		if(!this.atlasAssetArrayContains(this.columnAnnotations[columnId], annotation)){
+			this.columnAnnotations[columnId].push(annotation);
+		}
+	},
+
+	componentWillUnmount : function() {
+		if(this.refreshInterval){
+			clearInterval(this.refreshInterval);
+		}
+   	},
+
+	referenceClick : function(asset){
+		if(this.state == null || this.state.selectedTable == null || this.state.selectedTable.reference.id != asset.reference.id){
+			if(this.refreshInterval){
+				clearInterval(this.refreshInterval);
+			}
+			this.setState({currentlyLoading : true, selectedTable: null, filteredColumns : [], columns: []});
+			this.load(asset.reference);
+			this.refreshInterval = setInterval(function(){this.load(asset.reference)}.bind(this), 15000);
+		}
+	},
+
+	doFilter : function(params){
+		var columns = this.state.columns.slice();
+		var filteredColumns = this.filterOnDataQualityScore(columns, params.qualityScoreFilter);
+		filteredColumns = this.filterOnDataClass(filteredColumns, params.dataClassFilter);
+		this.setState({filteredColumns: filteredColumns});
+	},
+
+	filterOnDataQualityScore : function(columns, equation){
+		if(equation.indexOf("All")>-1){
+			return columns;
+		}
+
+		var columns = columns.slice();
+		var matchedColumns = [];
+		$.each(columns, function(index, col){
+			var match = false;
+			$.each(col.loadedAnnotations, function(k, annotation){
+				if(equation && annotation.qualityScore){
+						if(eval("annotation.qualityScore" + equation)){
+							if(matchedColumns.indexOf(col) == -1){
+								matchedColumns.push(col);
+							}
+						}
+				}
+			}.bind(this));
+		}.bind(this));
+
+		return matchedColumns;
+	},
+
+	filterOnDataClass : function(columns, key){
+		if(key == "All"){
+			return columns;
+		}
+		var matchedColumns = [];
+		$.each(columns, function(index, col){
+			var match = false;
+			$.each(col.loadedAnnotations, function(k, annotation){
+				if(annotation.inferredDataClass &&
+						annotation.inferredDataClass.className == key){
+					if(matchedColumns.indexOf(col) == -1){
+						matchedColumns.push(col);
+					}
+				}
+			});
+		});
+
+		return matchedColumns;
+	},
+
+	doImport : function(){
+		var params = {
+				jdbcString : this.refs.jdbcInput.getValue(),
+				user: this.refs.userInput.getValue(),
+				password : this.refs.passInput.getValue(),
+				database :this.refs.dbInput.getValue(),
+				schema : this.refs.schemaInput.getValue(),
+				table : this.refs.sourceInput.getValue()
+		};
+
+		this.setState({importingTable : true, tableWasImported : true, });
+
+		$.ajax({
+		      url: ODFGlobals.importUrl,
+		      contentType: "application/json",
+		      dataType: 'json',
+		      type: 'POST',
+		      data: JSON.stringify(params),
+		      success: function(data) {
+		    	  this.setState({importFeedback: {msg: "Registration successful!", style: "primary"}, importingTable: false});
+		      }.bind(this),
+		      error: function(xhr, status, err) {
+				  if(this.isMounted()){
+					var errorMsg = status;
+					if(xhr.responseJSON && xhr.responseJSON.error){
+		    				errorMsg = xhr.responseJSON.error;
+		    		  	}
+				    	var msg = "Table could not be registered: " + errorMsg + ", " + err.toString();
+			    	  	this.setState({importFeedback: {msg: msg, style: "warning"}, importingTable: false});
+				  }
+		      }.bind(this)
+		    });
+	},
+
+	closeImportingDialog : function(){
+		if(this.state.importingTable){
+			return;
+		}
+
+		var newState = {tableWasImported: false, showImportDialog : false, importFeedback: {msg: null}};
+		if(this.state.tableWasImported){
+			this.loadSources();
+			newState.sources = null;
+			newState.filteredSources = null;
+		}
+		this.setState(newState);
+	},
+
+	shopData : function(){
+		var selectedColumns = [];
+		var selectedSources = [];
+
+		$.each(this.state.columns, function(key, col){
+			if(col.isSelected){
+				selectedColumns.push(col);
+			}
+		});
+
+		$.each(this.state.sources, function(key, src){
+			if(src.isSelected){
+				selectedSources.push(src);
+			}
+		});
+
+		console.log("Do something with the selected columns!")
+		console.log(selectedColumns);
+		console.log(selectedSources);
+	},
+
+	filterSources : function(e){
+		var value = $(e.target).val();
+		var filtered = [];
+		if(value.trim() == ""){
+			filtered = this.state.sources;
+		}else{
+			$.each(this.state.sources, function(key, source){
+				if(source.name.toUpperCase().indexOf(value.toUpperCase()) > -1){
+					filtered.push(source);
+				}
+			});
+		}
+		this.setState({filteredSources : filtered});
+	},
+
+	storeImportDialogDefaults: function() {
+		var defaultValues = {
+		   "jdbcInput": this.refs.jdbcInput.getValue(),
+		   "userInput": this.refs.userInput.getValue(),
+		   "passInput": this.refs.passInput.getValue(),
+		   "dbInput": this.refs.dbInput.getValue(),
+		   "schemaInput": this.refs.schemaInput.getValue(),
+		   "sourceInput": this.refs.sourceInput.getValue(),
+		};
+		localStorage.setItem("odf-client-defaults", JSON.stringify(defaultValues) );
+	},
+
+	render : function(){
+		var columnRows = [];
+		var sourceHead = null;
+		var sourceList = null;
+		var columnsGridHeader = <thead><tr><th>Column</th><th>Datatype</th><th>Annotations</th></tr></thead>;
+		var currentlyLoadingImg = null;
+		if(this.state){
+			var sourceListContent = null;
+			if(this.state.sources){
+				var sourceSpec =  {
+
+						attributes: [
+						       {key: "isSelected", label: "",
+								func: function(val, asset){
+									return <SelectCheckbox onChange={function(selected){
+										asset.isSelected = selected;
+									}.bind(this)} asset={asset} />
+
+								}},
+								{key: "icon", label: "", func:
+						    	   function(val, asset){
+							    	   if(asset && asset.type && UISpec[asset.type] && UISpec[asset.type].icon){
+							    		   return UISpec[asset.type].icon;
+							    	   }
+							    	   return UISpec["DefaultDocument"].icon;
+						       		}
+						       },
+							   {key: "name", label: "Name"},
+			                   {key: "type", label: "Type"},
+			                   {key: "annotations", label: "Annotations",
+					        	  func: function(val){
+					        		  if(!val){
+					        			  return 0;
+					        			  }
+					        		  return val.length;
+					        		}
+			                   }
+			            ]};
+
+				sourceListContent = <ODFBrowser.ODFPagingTable rowAssets={this.state.filteredSources} onRowClick={this.referenceClick} spec={sourceSpec}/>;
+			}else{
+				sourceListContent = <Image src="img/lg_proc.gif" rounded />;
+			}
+
+			var sourceImportBtn = <Button style={{float:"right"}} onClick={function(){this.setState({showImportDialog: true});}.bind(this)}>Register new data set</Button>;
+			var sourceImportingImg = null;
+			if(this.state.importingTable){
+				sourceImportingImg = <Image src="img/lg_proc.gif" rounded />;
+			}
+
+			var importFeedback = <h3><Label style={{whiteSpace: "normal"}} bsStyle={this.state.importFeedback.style}>{this.state.importFeedback.msg}</Label></h3>
+
+			var storedDefaults = null;
+			try {
+			   storedDefaults = JSON.parse(localStorage.getItem("odf-client-defaults"));
+			} catch(e) {
+				console.log("Couldnt parse defaults from localStorage: " + e);
+				storedDefaults = {};
+			}
+			if (!storedDefaults) {
+				storedDefaults = {};
+			}
+			console.log("Stored defaults: " + storedDefaults);
+
+			var sourceImportDialog =  <Modal show={this.state.showImportDialog} onHide={this.closeImportingDialog}>
+								          <Modal.Header closeButton>
+								             <Modal.Title>Register new JDBC data set</Modal.Title>
+								          </Modal.Header>
+								          <Modal.Body>
+								          	{importFeedback}
+								            <form>
+								          	 <Input type="text" ref="jdbcInput" defaultValue={storedDefaults.jdbcInput} label="JDBC string" />
+								             <Input type="text" ref="userInput" defaultValue={storedDefaults.userInput} label="Username" />
+								             <Input type="password" ref="passInput" defaultValue={storedDefaults.passInput} label="Password" />
+								             <Input type="text" ref="dbInput" defaultValue={storedDefaults.dbInput} label="Database" />
+								             <Input type="text" ref="schemaInput" defaultValue={storedDefaults.schemaInput} label="Schema" />
+								             <Input type="text" ref="sourceInput" defaultValue={storedDefaults.sourceInput} label="Table" />
+								             </form>
+								             {sourceImportingImg}
+								         </Modal.Body>
+								         <Modal.Footer>
+								         <Button onClick={this.storeImportDialogDefaults}>Store values as defaults</Button>
+								         <Button bsStyle="primary" onClick={this.doImport}>Register</Button>
+								         <Button onClick={this.closeImportingDialog}>Close</Button>
+								         </Modal.Footer>
+									</Modal>;
+			sourceList = <Panel style={{float:"left", marginRight: 30, maxWidth:600, minHeight: 550}}>
+								{sourceImportDialog}
+								<h3 style={{float: "left", marginTop: "5px"}}>
+									Data sets
+								</h3>
+								{sourceImportBtn}<br style={{clear: "both"}}/>
+								<Input onChange={this.filterSources} addonBefore={<Glyphicon glyph="search" />} label=" " type="text" placeholder="Filter ..." />
+								<br/>
+								{sourceListContent}
+							</Panel>;
+			if(this.state.currentlyLoading){
+				currentlyLoadingImg = <Image src="img/lg_proc.gif" rounded />;
+			}
+			var panel = <div style={{float: "left"}}>{currentlyLoadingImg}</div>;
+
+			if(this.state.selectedTable){
+				var source = this.state.selectedTable;
+				var sourceAnnotations = [];
+				if(source.loadedAnnotations){
+					//reverse so newest is at front
+					var sourceAnns = source.loadedAnnotations.slice();
+					sourceAnns.reverse();
+					var processedTypes = [];
+					$.each(sourceAnns, function(key, val){
+						if(processedTypes.indexOf(val.annotationType) == -1){
+							processedTypes.push(val.annotationType);
+							var summary = (val.summary ? ", " + val.summary : "");
+							sourceAnnotations.push(<ODFAnnotationMarker key={key} annotation={val}/>);
+						}
+					});
+				}
+
+				var hasColumns = (source.columns && source.columns.length > 0 ? true : false);
+				var columnsString = (hasColumns ? "Columns: " + source.columns.length : null);
+				var annotationsFilter = (hasColumns ? <FilterMenu onFilter={this.doFilter} dataClasses={this.state.dataClasses} style={{float: "right"}} /> : null);
+
+				sourceHead = <div>
+								<h3>{source.name} </h3>
+									<div style={{}}>
+										<NewAnalysisRequestButton dataSetId={this.state.selectedTable.reference.id} />
+									</div>
+								<br/>
+								Description: {source.description}
+								<br/>
+								{columnsString}
+								<br/>Annotations:{sourceAnnotations}
+								<br/>
+								{annotationsFilter}
+								</div>;
+
+				panel = <Panel style={{float: "left", width: "50%"}} header={sourceHead}>
+							{currentlyLoadingImg}
+						</Panel>;
+			}
+			var columnsTable = null;
+			var filteredColumns = (this.state.filteredColumns ? this.state.filteredColumns : []).slice();
+
+			if(filteredColumns.length > 0){
+				var colSpec = {attributes: [{key: "isSelected", label: "Select",
+					func: function(val, col){
+						return <SelectCheckbox onChange={function(selected){
+							col.isSelected = selected;
+						}.bind(this)} asset={col} />
+
+					}},
+	               {key: "name", label: "Name", sort: true},
+		           {key: "dataType", label: "Datatype"},
+		           {key: "loadedAnnotations", label: "Annotations",
+			        	  func: function(annotations, obj){
+			        		  return <AnnotationsColumn annotations={annotations} />;
+			        	  }
+			          }]};
+				columnsTable = <div><ODFBrowser.ODFPagingTable ref="columnsTable" rowAssets={filteredColumns} assetType={"columns"} spec={colSpec}/><br/><ODFAnnotationLegend /></div>;
+				panel = (<Panel style={{float:"left", width: "50%"}} header={sourceHead}>
+							{columnsTable}
+						</Panel>);
+			}
+		}
+
+		var contentComponent = <Jumbotron>
+	      <div>
+	         <h2>Welcome to your Data Lake</h2>
+	         	<Button bsStyle="success" onClick={this.shopData}>
+	         		Shop selected data  <Glyphicon glyph="shopping-cart" />
+         		</Button>
+	         	<br/>
+	         	<br/>
+		         {sourceList}
+		         {panel}
+		        <div style={{clear: "both"}} />
+         </div>
+       </Jumbotron>;
+
+		return <div>{contentComponent}</div>;
+	}
+});
+
+var ODFTermPage = React.createClass({
+
+  getInitialState() {
+    return {terms: []};
+  },
+
+  loadTerms : function() {
+    // clear alert
+    this.props.alertCallback({type: "", message: ""});
+    var req = AtlasHelper.searchAtlasMetadata("from BusinessTerm",
+
+        function(data){
+		   	if(!this.isMounted()){
+				return;
+			}
+			this.setState({terms: data});
+        }.bind(this),
+
+        function() {
+        }.bind(this)
+    );
+  },
+
+  componentDidMount() {
+    this.loadTerms();
+  },
+
+  render: function() {
+     var terms = $.map(
+        this.state.terms,
+        function(term) {
+          return <tr style={{cursor: 'pointer'}} key={term.name} title={term.example} onClick={function(){
+        	  var win = window.open(term.originRef, '_blank');
+        	  win.focus();}
+          }>
+                  <td>
+                     {term.name}
+                  </td>
+                  <td>
+                	{term.description}
+                  </td>
+                 </tr>
+        }.bind(this)
+       );
+
+     return (
+       <div className="jumbotron">
+       <h2>Glossary</h2>
+       <br/>
+       <br/>
+       <Panel>
+       	  <h3>Terms</h3>
+          <Table>
+          	 <thead>
+          	 	<tr>
+          	 	<th>Name</th>
+          	 	<th>Description</th>
+          	 	</tr>
+          	 </thead>
+             <tbody>
+                {terms}
+             </tbody>
+          </Table>
+          </Panel>
+       </div>
+     )
+   }
+});
+
+var ODFClient = React.createClass({
+
+   componentDidMount: function() {
+     $(window).bind("hashchange", this.parseUrl);
+     this.parseUrl();
+   },
+
+   parseUrl : function(){
+    var target = constants_ODFNavBar.odfDataLakePage;
+    var navAddition = null;
+    var hash = document.location.hash;
+    if(hash && hash.length > 1){
+      hash = hash.split("#")[1];
+      var split = hash.split("/");
+      var navHash = split[0];
+      if(split.length > 0){
+        navAddition = split.slice(1);
+      }
+      if(constants_ODFNavBar[navHash]){
+        target = constants_ODFNavBar[navHash];
+      }
+    }
+    this.setState({
+      activeNavBarItem: target,
+        navAddition: navAddition}
+    );
+  },
+
+  getInitialState: function() {
+    return ({
+        activeNavBarItem: constants_ODFNavBar.odfDataLakePage,
+        navAddition: null,
+        globalAlert: {
+          type: "",
+          message: ""
+        }
+    });
+  },
+
+  handleNavBarSelection: function(selection) {
+    $.each(constants_ODFNavBar, function(key, ref){
+      if(ref == selection){
+        document.location.hash = key;
+      }
+    });
+    this.setState({ activeNavBarItem: selection });
+  },
+
+  handleAlert: function(alertInfo) {
+    this.setState({ globalAlert: alertInfo });
+  },
+
+  render: function() {
+    var alertComp = null;
+    if (this.state.globalAlert.type != "") {
+       alertComp = <Alert bsStyle={this.state.globalAlert.type}>{this.state.globalAlert.message}</Alert>;
+    }
+
+    var contentComponent = <ODFDataLakePage alertCallback={this.handleAlert}/>;
+    if (this.state.activeNavBarItem == constants_ODFNavBar.odfDataLakePage) {
+       contentComponent = <ODFDataLakePage alertCallback={this.handleAlert}/>;
+    } else if (this.state.activeNavBarItem == constants_ODFNavBar.odfTermPage) {
+       contentComponent = <ODFTermPage alertCallback={this.handleAlert}/>;
+    }
+
+    var divStyle = {
+//      marginLeft: "80px",
+//      marginRight: "80px"
+    };
+
+    return (
+        <div>
+           <ODFNavBar activeKey={this.state.activeNavBarItem} selectCallback={this.handleNavBarSelection}></ODFNavBar>
+           <div style={divStyle}>
+              {alertComp}
+              {contentComponent}
+           </div>
+        </div>
+    );
+  }
+});
+
+var div = $("#odf-toplevel-div")[0];
+ReactDOM.render(<ODFClient/>, div);
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js b/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js
new file mode 100755
index 0000000..cf50075
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-configuration-store.js
@@ -0,0 +1,63 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var ODFGlobals = require("./odf-globals.js");
+
+var ConfigurationStore = {
+
+  // readUserDefinedProperties(successCallback, alertCallback) {
+   readConfig(successCallback, alertCallback) {
+	   if (alertCallback) {
+	     alertCallback({type: ""});
+	   }
+     // clear alert
+
+     return $.ajax({
+       url: ODFGlobals.apiPrefix + "settings",
+       dataType: 'json',
+       type: 'GET',
+       success: successCallback,
+       error: function(xhr, status, err) {
+         if (alertCallback) {
+            var msg = "Error while reading user defined properties: " + err.toString();
+            alertCallback({type: "danger", message: msg});
+         }
+       }
+      }).abort;
+   },
+
+   updateConfig(config, successCallback, alertCallback) {
+		if (alertCallback) {
+			 alertCallback({type: ""});
+		}
+
+	    return $.ajax({
+		       url: ODFGlobals.apiPrefix + "settings",
+		       contentType: "application/json",
+		       dataType: 'json',
+		       type: 'PUT',
+		       data: JSON.stringify(config),
+		       success: successCallback,
+		       error: function(xhr, status, err) {
+		         if (alertCallback) {
+		            var msg = "Error while reading user defined properties: " + err.toString();
+		            alertCallback({type: "danger", message: msg});
+		         }
+		       }
+	     }).abort;
+   }
+}
+
+module.exports = ConfigurationStore;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-console.js b/odf/odf-web/src/main/webapp/scripts/odf-console.js
new file mode 100755
index 0000000..aa70808
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-console.js
@@ -0,0 +1,967 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//css imports
+require("bootstrap/dist/css/bootstrap.min.css");
+require("bootstrap-material-design/dist/css/bootstrap-material-design.min.css");
+require("bootstrap-material-design/dist/css/ripples.min.css");
+require("roboto-font/css/fonts.css");
+
+
+//js imports
+var $ = require("jquery");
+var bootstrap = require("bootstrap");
+
+var React = require("react");
+var ReactDOM = require("react-dom");
+var LinkedStateMixin = require("react-addons-linked-state-mixin");
+var ReactBootstrap = require("react-bootstrap");
+
+var ODFGlobals = require("./odf-globals.js");
+var ODFStats = require("./odf-statistics.js");
+var ODFSettings = require("./odf-settings.js");
+var ODFServices = require("./odf-services.js");
+var ODFBrowser = require("./odf-metadata-browser.js").ODFMetadataBrowser;
+var ODFRequestBrowser = require("./odf-request-browser.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var configurationStore = require("./odf-utils.js").ConfigurationStore;
+var servicesStore = require("./odf-utils.js").ServicesStore;
+var AtlasHelper = require("./odf-utils.js").AtlasHelper;
+var AnnotationStoreHelper = require("./odf-utils.js").AnnotationStoreHelper;
+var OdfAnalysisRequest = require("./odf-analysis-request.js");
+var LogViewer = require("./odf-logs.js");
+//var Notifications = require("./odf-notifications.js");
+var NewAnalysisRequestButton = OdfAnalysisRequest.NewAnalysisRequestButton;
+var NewAnalysisRequestDialog = OdfAnalysisRequest.NewAnalysisRequestDialog;
+var NewCreateAnnotationsButton = OdfAnalysisRequest.NewCreateAnnotationsButton;
+var NewCreateAnnotationsDialog = OdfAnalysisRequest.NewCreateAnnotationsDialog;
+
+var Button = ReactBootstrap.Button;
+var Nav = ReactBootstrap.Nav;
+var NavItem = ReactBootstrap.NavItem;
+var Navbar = ReactBootstrap.Navbar;
+var NavDropdown = ReactBootstrap.NavDropdown;
+var MenuItem = ReactBootstrap.MenuItem;
+var Jumbotron = ReactBootstrap.Jumbotron;
+var Grid = ReactBootstrap.Grid;
+var Row = ReactBootstrap.Row;
+var Col = ReactBootstrap.Col;
+var Table = ReactBootstrap.Table;
+var Modal = ReactBootstrap.Modal;
+var Input = ReactBootstrap.Input;
+var Alert = ReactBootstrap.Alert;
+var Panel = ReactBootstrap.Panel;
+var Label = ReactBootstrap.Label;
+var Input = ReactBootstrap.Input;
+var ProgressBar = ReactBootstrap.ProgressBar;
+var Image = ReactBootstrap.Image;
+var ListGroup = ReactBootstrap.ListGroup;
+var ListGroupItem = ReactBootstrap.ListGroupItem;
+var Tabs = ReactBootstrap.Tabs;
+var Tab = ReactBootstrap.Tab;
+var Glyphicon = ReactBootstrap.Glyphicon;
+
+var PerServiceStatusGraph = ODFStats.PerServiceStatusGraph;
+var TotalAnalysisGraph = ODFStats.TotalAnalysisGraph;
+var SystemDiagnostics = ODFStats.SystemDiagnostics;
+
+////////////////////////////////////////////////////////////////
+// toplevel navigation bar
+
+const constants_ODFNavBar = {
+  gettingStarted: "navKeyGettingStarted",
+  configuration: "navKeyConfiguration",
+  monitor: "navKeyMonitor",
+  discoveryServices: "navKeyDiscoveryServices",
+  data: "navKeyData",
+  analysis: "navKeyAnalysis"
+}
+
+var ODFNavBar = React.createClass({
+   render: function() {
+       return (
+         <Navbar inverse>
+           <Navbar.Header>
+             <Navbar.Brand>
+               <b>Open Discovery Framework</b>
+             </Navbar.Brand>
+             <Navbar.Toggle />
+           </Navbar.Header>
+           <Navbar.Collapse>
+             <Nav pullRight activeKey={this.props.activeKey} onSelect={this.props.selectCallback}>
+               <NavItem eventKey={constants_ODFNavBar.gettingStarted} href="#">Getting Started</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.monitor} href="#">System Monitor</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.configuration} href="#">Settings</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.discoveryServices} href="#">Services</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.data} href="#">Data sets</NavItem>
+               <NavItem eventKey={constants_ODFNavBar.analysis} href="#">Analysis</NavItem>
+             </Nav>
+           </Navbar.Collapse>
+         </Navbar>
+       );
+   }
+});
+
+
+
+/////////////////////////////////////////////////////////////////////////////////////////
+// Configuration page
+
+var ConfigurationPage = React.createClass({
+  componentWillMount() {
+      this.props.alertCallback({type: ""});
+  },
+
+  render: function() {
+    return (
+    <div className="jumbotron">
+      <Tabs position="left" defaultActiveyKey={1}>
+        <Tab eventKey={1} title="General">
+          <ODFSettings.ODFConfigPage alertCallback={this.props.alertCallback}/>
+        </Tab>
+        <Tab eventKey={2} title="Spark settings">
+          <ODFSettings.SparkConfigPage alertCallback={this.props.alertCallback}/>
+        </Tab>
+        <Tab eventKey={3} title="User-defined">
+          <ODFSettings.UserDefinedConfigPage alertCallback={this.props.alertCallback}/>
+        </Tab>
+      </Tabs>
+      </div>
+      );
+  }
+
+});
+
+const GettingStartedPage = React.createClass({
+  getInitialState() {
+     return ({version: "NOTFOUND"});
+  },
+
+  componentWillMount() {
+     this.props.alertCallback({type: ""});
+     $.ajax({
+         url: ODFGlobals.engineUrl + "/version",
+         type: 'GET',
+         success: function(data) {
+             this.setState(data);
+         }.bind(this)
+       });
+  },
+
+  render: function() {
+    var divStyle = {
+      marginLeft: "80px",
+      marginRight: "80px"
+    };
+    return (
+      <Jumbotron>
+      <div style={divStyle}>
+         <h2>Welcome to the Open Discovery Framework Console</h2>
+         <p/>The "Open Discovery Framework" (ODF) is an open metadata-based platform
+         that strives to be a common home for different analytics technologies
+         that discover characteristics of data sets and relationships between
+         them (think "AppStore for discovery algorithms").
+         Using ODF, applications can leverage new discovery algorithms and their
+         results with minimal integration effort.
+         <p/>
+         This console lets you administer and configure your ODF system, as well as
+         run analyses and browse their results.
+         <p/>
+         <p><Button target="_blank" href="doc" bsStyle="primary">Open Documentation</Button></p>
+         <p><Button target="_blank" href="swagger" bsStyle="success">Show API Reference</Button></p>
+         <p/>
+		 Version: {this.state.version}
+         </div>
+       </Jumbotron>
+
+      )
+  }
+
+});
+
+/////////////////////////////////////////////////////////////////////
+// monitor page
+var StatusGraphs = React.createClass({
+
+	selectTab : function(key){
+		this.setState({key});
+	},
+
+	getInitialState() {
+	    return {
+	      key: "system_state"
+	    };
+	 },
+
+	render : function() {
+		var divStyle = {
+		     marginLeft: "20px"
+	    };
+
+		return (
+			<div>
+				<Tabs position="left" activeKey={this.state.key} onSelect={this.selectTab}>
+					<Tab eventKey={"system_state"} title="System state">
+						<div style={divStyle}>
+							<TotalAnalysisGraph visible={this.state.key == "system_state"} alertCallback={this.props.alertCallback}/>
+							<PerServiceStatusGraph visible={this.state.key == "system_state"} alertCallback={this.props.alertCallback}/>
+						</div>
+					</Tab>
+				    <Tab eventKey={"diagnostics"} title="Diagnostics">
+						<div style={divStyle}>
+							<SystemDiagnostics visible={this.state.key == "diagnostics"} alertCallback={this.props.alertCallback}/>
+						</div>
+					</Tab>
+					<Tab eventKey={"logs"} title="System logs">
+						<div style={divStyle}>
+							<LogViewer visible={this.state.key == "logs"} alertCallback={this.props.alertCallback}/>
+						</div>
+					</Tab>
+				</Tabs>
+			</div>
+         );
+	}
+
+
+});
+
+var MonitorPage = React.createClass({
+	mixins : [AJAXCleanupMixin],
+
+	getInitialState() {
+		return ( {
+				monitorStatusVisible: false,
+				monitorStatusStyle:"success",
+				monitorStatusMessage: "OK",
+				monitorWorkInProgress: false
+		});
+	},
+
+	componentWillMount() {
+	   this.props.alertCallback({type: ""});
+	},
+
+	checkHealth() {
+		this.setState({monitorWorkInProgress: true, monitorStatusVisible: false});
+	    var url = ODFGlobals.engineUrl + "/health";
+		var req = $.ajax({
+	         url: url,
+	         dataType: 'json',
+	         type: 'GET',
+	         success: function(data) {
+	        	 var status = data.status;
+	        	 var newState = {
+	        		monitorStatusVisible: true,
+	        		monitorWorkInProgress: false
+	        	 };
+
+	        	 if (status == "OK") {
+	        		 newState.monitorStatusStyle = "success";
+	        	 } else if (status == "WARNING") {
+	        		 newState.monitorStatusStyle = "warning";
+	        	 } else if (status == "ERROR") {
+	        		 newState.monitorStatusStyle = "danger";
+	        	 }
+	        	 // TODO show more than just the first message
+        		 newState.monitorStatusMessage = "Status: " + status + ". " + data.messages[0];
+
+	        	 this.setState(newState);
+	         }.bind(this),
+	         error: function(xhr, status, err) {
+	      	   if(this.isMounted()){
+	      		   this.setState({
+	        	   monitorStatusVisible: true,
+	        	   monitorStatusStyle:"danger",
+	        	   monitorStatusMessage: "An error occured: " + err.toString(),
+	        	   monitorWorkInProgress: false});
+	      	   };
+	         }.bind(this)
+	        });
+		this.storeAbort(req.abort);
+	},
+
+	performRestart : function(){
+		$.ajax({
+		      url: ODFGlobals.engineUrl + "/shutdown",
+		      contentType: "application/json",
+		      type: 'POST',
+		      data: JSON.stringify({restart: "true"}),
+		      success: function(data) {
+		  			this.setState({monitorStatusVisible : true, monitorStatusStyle: "info", monitorStatusMessage: "Restart in progress..."});
+		      }.bind(this),
+		      error: function(xhr, status, err) {
+		  			this.setState({monitorStatusVisible : true, monitorStatusStyle: "warning", monitorStatusMessage: "Restart request failed"});
+		      }.bind(this)
+		    });
+	},
+
+	render() {
+	  var divStyle = {
+		      marginLeft: "20px"
+		    };
+	  var monitorStatus = null;
+	  if (this.state.monitorStatusVisible) {
+		  monitorStatus = <Alert bsStyle={this.state.monitorStatusStyle}>{this.state.monitorStatusMessage}</Alert>;
+	  }
+	  var progressIndicator = null;
+	  if (this.state.monitorWorkInProgress) {
+		  progressIndicator = <Image src="img/lg_proc.gif" rounded />;
+	  }
+	  return (
+	    	<div className="jumbotron">
+	    	<h3>System health</h3>
+	    	  <div style={divStyle}>
+	           	<Button className="btn-raised" bsStyle="primary" disabled={this.state.monitorWorkInProgress} onClick={this.checkHealth}>Check health</Button>
+	           	<Button className="btn-raised" bsStyle="warning" onClick={this.performRestart}>Restart ODF</Button>
+	           	{progressIndicator}
+	           	{monitorStatus}
+	           	<hr/>
+	           	<div>
+	           	</div>
+	           	<StatusGraphs alertCallback={this.props.alertCallback}/>
+	    	  </div>
+	    	</div>
+	  );
+	}
+
+});
+
+//////////////////////////////////////////////////////
+// discovery services page
+var DiscoveryServicesPage = React.createClass({
+  mixins : [AJAXCleanupMixin],
+
+  getInitialState() {
+	  return ({discoveryServices: []});
+  },
+
+  loadDiscoveryServices() {
+	  // clear alert
+    this.props.alertCallback({type: "", message: ""});
+
+	var req = $.ajax({
+	    url: ODFGlobals.servicesUrl,
+	    dataType: 'json',
+	    type: 'GET',
+	    success: function(data) {
+	       this.setState({discoveryServices: data});
+	    }.bind(this),
+	    error: function(xhr, status, err) {
+    	   if(this.isMounted()){
+    		   var msg = "Error while reading ODF services: " + err.toString();
+    		   this.props.alertCallback({type: "danger", message: msg});
+    	   }
+	    }.bind(this)
+	  });
+
+	this.storeAbort(req.abort);
+  },
+
+  componentDidMount() {
+	  this.loadDiscoveryServices();
+  },
+
+  render: function() {
+	var services = $.map(
+        this.state.discoveryServices,
+        function(dsreg) {
+          return <tr key={dsreg.id}>
+                  <td>
+                     <ODFServices.DiscoveryServiceInfo dsreg={dsreg} refreshCallback={this.loadDiscoveryServices} alertCallback={this.props.alertCallback}/>
+                  </td>
+                 </tr>
+        }.bind(this)
+    );
+
+	return (
+	     <div className="jumbotron">
+           <h3>Services</h3>
+           This page lets you manage the services for this ODF instance.
+           You can add services manually by clicking the <em>Add Service</em> button or
+           register remote services (e.g. deployed on Bluemix) you have built with the ODF service developer kit by
+           clicking the <em>Register remote services</em> link.
+           <p/>
+					 <ODFServices.AddDiscoveryServiceButton refreshCallback={this.loadDiscoveryServices}/>
+           <p/>
+	       	<Table bordered responsive>
+	         <tbody>
+	         {services}
+             </tbody>
+          </Table>
+	     </div>
+	);
+  }
+
+});
+
+//////////////////////////////////////////////////////////////
+// Analysis Page
+var AnalysisRequestsPage = React.createClass({
+  mixins : [AJAXCleanupMixin],
+
+  getInitialState() {
+      return {recentAnalysisRequests: null, config: {}, services : []};
+  },
+
+  componentWillReceiveProps : function(nextProps){
+  	var selection = null;
+	if(nextProps.navAddition && nextProps.navAddition.length > 0 && nextProps.navAddition[0] && nextProps.navAddition[0].length > 0){
+		var jsonAddition = {};
+
+		try{
+			jsonAddition = JSON.parse(decodeURIComponent(nextProps.navAddition[0]));
+		}catch(e){
+
+		}
+
+		if(jsonAddition.requestId){
+			$.each(this.state.recentAnalysisRequests, function(key, tracker){
+				var reqId = jsonAddition.requestId;
+
+				if(tracker.request.id == reqId){
+					selection = reqId;
+				}
+			}.bind(this));
+		}else if(jsonAddition.id && jsonAddition.repositoryId){
+			selection = jsonAddition;
+		}
+	}
+
+	if(selection != this.state.selection){
+		this.setState({selection : selection});
+	}
+  },
+
+  componentDidMount() {
+	  if(!this.refreshInterval){
+		  this.refreshInterval = window.setInterval(this.refreshAnalysisRequests, 5000);
+	  }
+      this.initialLoadServices();
+      this.initialLoadRecentAnalysisRequests();
+  },
+
+  componentWillUnmount : function() {
+	  if(this.refreshInterval){
+		  window.clearInterval(this.refreshInterval);
+	  }
+  },
+
+  getDiscoveryServiceNameFromId(id) {
+      var servicesWithSameId = this.state.services.filter(
+         function(dsreg) {
+             return dsreg.id == id;
+         }
+      );
+      if (servicesWithSameId.length > 0) {
+        return servicesWithSameId[0].name;
+      }
+      return null;
+  },
+
+  refreshAnalysisRequests : function(){
+	  var req = configurationStore.readConfig(
+		      function(config) {
+		          this.setState({config: config});
+		          const url = ODFGlobals.analysisUrl + "?offset=0&limit=20";
+		          $.ajax({
+		            url: url,
+		            dataType: 'json',
+		            type: 'GET',
+		            success: function(data) {
+		            	$.each(data.analysisRequestTrackers, function(key, tracker){
+		                	//collect service names by id and add to json so that it can be displayed later
+		            		$.each(tracker.discoveryServiceRequests, function(key, request){
+			            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
+			            		request.discoveryServiceName = serviceName;
+		            		}.bind(this));
+		            	}.bind(this));
+		                this.setState({recentAnalysisRequests: data.analysisRequestTrackers});
+		            }.bind(this),
+		            error: function(xhr, status, err) {
+		            	if(status != "abort" ){
+		            		console.error(url, status, err.toString());
+		            	}
+		            	if(this.isMounted()){
+		            	  var msg = "Error while refreshing recent analysis requests: " + err.toString();
+		            	  this.props.alertCallback({type: "danger", message: msg});
+		            	}
+		            }.bind(this)
+		          });
+		      }.bind(this),
+	      this.props.alertCallback
+	    );
+
+	    this.storeAbort(req.abort);
+  },
+
+  initialLoadServices() {
+	this.setState({services: null});
+
+    var req = servicesStore.getServices(
+      function(services) {
+          this.setState({services: services});
+      }.bind(this),
+      this.props.alertCallback
+    );
+
+    this.storeAbort(req.abort);
+  },
+
+  initialLoadRecentAnalysisRequests() {
+	this.setState({recentAnalysisRequests: null});
+
+    var req = configurationStore.readConfig(
+      function(config) {
+          this.setState({config: config});
+          const url = ODFGlobals.analysisUrl + "?offset=0&limit=20";
+          $.ajax({
+            url: url,
+            dataType: 'json',
+            type: 'GET',
+            success: function(data) {
+            	var selection = null;
+            	$.each(data.analysisRequestTrackers, function(key, tracker){
+            		if(this.props.navAddition && this.props.navAddition.length > 0 && this.props.navAddition[0].length > 0){
+            			var reqId = "";
+            			try{
+            				reqId = JSON.parse(decodeURIComponent(this.props.navAddition[0])).requestId
+            			}catch(e){
+
+            			}
+            			if(tracker.request.id == reqId){
+            				selection = reqId;
+            			}
+        			}
+
+                	//collect service names by id and add to json so that it can be displayed later
+            		$.each(tracker.discoveryServiceRequests, function(key, request){
+	            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
+	            		request.discoveryServiceName = serviceName;
+            		}.bind(this));
+            	}.bind(this));
+
+            	var newState = {recentAnalysisRequests: data.analysisRequestTrackers};
+            	if(selection){
+            		newState.selection = selection;
+            	}
+
+               this.setState(newState);
+            }.bind(this),
+            error: function(xhr, status, err) {
+            	if(status != "abort" ){
+            		console.error(url, status, err.toString());
+            	}
+            	if(this.isMounted()){
+            	  var msg = "Error while loading recent analysis requests: " + err.toString();
+            	  this.props.alertCallback({type: "danger", message: msg});
+            	}
+            }.bind(this)
+          });
+      }.bind(this),
+      this.props.alertCallback
+    );
+
+    this.storeAbort(req.abort);
+  },
+
+  cancelAnalysisRequest(tracker) {
+      var url = ODFGlobals.analysisUrl + "/" + tracker.request.id + "/cancel";
+
+      $.ajax({
+          url: url,
+          type: 'POST',
+          success: function() {
+			  if(this.isMounted()){
+				  this.refreshAnalysisRequests();
+			  }
+          }.bind(this),
+          error: function(xhr, status, err) {
+        	  if(status != "abort" ){
+          		console.error(url, status, err.toString());
+        	  }
+
+        	  var errMsg = null;
+        	  if(err == "Forbidden"){
+        		  errMsg = "only analyses that have not been started yet can be cancelled!";
+        	  }else if(err == "Bad Request"){
+        		  errMsg = "the requested analysis could not be found!";
+        	  }
+        	  if(this.isMounted()){
+				  var msg = "Analysis could not be cancelled: " + (errMsg ? errMsg : err.toString());
+				  if(this.props.alertCallback){
+					  this.props.alertCallback({type: "danger", message: msg});
+				  }
+        	  }
+          }.bind(this)
+      });
+  },
+
+  viewResultAnnotations : function(target){
+	  this.setState({
+			resultAnnotations : null,
+			showAnnotations: true
+		});
+	  var req = AnnotationStoreHelper.loadAnnotationsForRequest(target.request.id,
+			function(data){
+				this.setState({
+					resultAnnotations : data.annotations
+				});
+			}.bind(this),
+			function(error){
+				console.error('Annotations could not be loaded ' + error);
+			}
+		);
+	  this.storeAbort(req.abort);
+  },
+
+  viewInAtlas : function(target){
+	  var repo =  target.request.dataSets[0].repositoryId;
+	  repo = repo.split("atlas:")[1];
+      var annotationQueryUrl = repo + "/#!/search?query=from%20ODFAnnotation%20where%20analysisRun%3D'"+ target.request.id + "'";
+	  var win = window.open(annotationQueryUrl, '_blank');
+  },
+
+  render : function() {
+    var loadingImg = null;
+    if(this.state.recentAnalysisRequests == null){
+    	loadingImg = <Image src="img/lg_proc.gif" rounded />;
+    }
+    var requestActions = [
+                           {
+                        	   assetType: ["requests"],
+                        	   actions : [
+                	              {
+                	            	  label: "Cancel analysis",
+                	            	  func: this.cancelAnalysisRequest,
+                	            	  filter: function(obj){
+                	            		  var val = obj.status;
+                	            		  if (val == "INITIALIZED" || val == "IN_DISCOVERY_SERVICE_QUEUE") {
+                	            			  return true;
+                	            		  }
+                	            		  return false;
+                	            	  }
+                	              },
+                	              {
+                	            	  label: "View results",
+                	            	  func: this.viewResultAnnotations
+                	              },
+                	              {
+                	            	  label: "View results in atlas",
+                	            	  func: this.viewInAtlas
+                	              }
+                	           ]
+                           	}
+                           ];
+    return (
+    		<div className="jumbotron">
+			   <h3>Analysis requests</h3>
+			   <div>
+		        Click Refresh to refresh the list of existing analysis requests.
+		        Only the last 20 valid requests are shown.
+		         <p/>
+		        <NewAnalysisRequestButton bsStyle="primary" onClose={this.refreshAnalysisRequests} alertCallback={this.props.alertCallback}/>
+		        <NewCreateAnnotationsButton bsStyle="primary" onClose={this.refreshAnalysisRequests} alertCallback={this.props.alertCallback}/>
+
+		        <Button bsStyle="success" onClick={this.refreshAnalysisRequests}>Refresh</Button> &nbsp;
+            	{loadingImg}
+		        <ODFRequestBrowser registeredServices={this.state.config.registeredServices} actions={requestActions} ref="requestBrowser" selection={this.state.selection} assets={this.state.recentAnalysisRequests}/>
+		       </div>
+            	<Modal show={this.state.showAnnotations} onHide={function(){this.setState({showAnnotations : false})}.bind(this)}>
+	            	<Modal.Header closeButton>
+	                	<Modal.Title>Analysis results for analysis {this.state.resultTarget}</Modal.Title>
+		             </Modal.Header>
+		             <Modal.Body>
+		             	<ODFBrowser ref="resultBrowser" type={"annotations"} assets={this.state.resultAnnotations} />
+		             </Modal.Body>
+		             <Modal.Footer>
+		            <Button onClick={function(){this.setState({showAnnotations : false})}.bind(this)}>Close</Button>
+		            </Modal.Footer>
+            	</Modal>
+		    </div>
+    );
+  }
+
+});
+
+var AnalysisDataSetsPage = React.createClass({
+  mixins : [AJAXCleanupMixin],
+
+  componentDidMount() {
+      this.loadDataFiles();
+      this.loadTables();
+      this.loadDocuments();
+  },
+
+  getInitialState() {
+      return ({	showDataFiles: true,
+    	  		showHideDataFilesIcon: "chevron-up",
+    	  		showTables: true,
+    	  		showHideTablesIcon: "chevron-up",
+    	  		showDocuments: true,
+    	  		showHideDocumentsIcon: "chevron-up",
+    	  		config: null});
+  },
+
+  componentWillReceiveProps : function(nextProps){
+	if(nextProps.navAddition && nextProps.navAddition.length > 0 && nextProps.navAddition[0]){
+		this.setState({selection : nextProps.navAddition[0]});
+	}else{
+		this.setState({selection : null});
+	}
+  },
+
+  showHideDataFiles() {
+	  this.setState({showDataFiles: !this.state.showDataFiles, showHideDataFilesIcon: (!this.state.showDataFiles? "chevron-up" : "chevron-down")});
+  },
+
+  showHideTables() {
+	  this.setState({showTables: !this.state.showTables, showHideTablesIcon: (!this.state.showTables? "chevron-up" : "chevron-down")});
+  },
+
+  showHideDocuments() {
+	  this.setState({showDocuments: !this.state.showDocuments, showHideDocumentsIcon: (!this.state.showDocuments ? "chevron-up" : "chevron-down")});
+  },
+
+  createAnnotations : function(target){
+		this.setState({showCreateAnnotationsDialog: true, selectedAsset : target.reference.id});
+  },
+
+  startAnalysis : function(target){
+		this.setState({showAnalysisRequestDialog: true, selectedAsset : target.reference.id});
+  },
+
+  viewInAtlas : function(target){
+	  var win = window.open(target.reference.url, '_blank');
+	  win.focus();
+  },
+
+  loadDataFiles : function(){
+	  var  resultQuery = "from DataFile";
+	  this.setState({
+			dataFileAssets : null
+	  });
+	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
+			function(data){
+				this.setState({
+					dataFileAssets : data
+				});
+			}.bind(this),
+			function(error){
+
+			}
+		);
+	  this.storeAbort(req.abort);
+  },
+
+  loadTables : function(){
+	  var  resultQuery = "from Table";
+	  this.setState({
+			tableAssets : null
+	  });
+	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
+			function(data){
+				this.setState({
+					tableAssets : data
+				});
+			}.bind(this),
+			function(error){
+
+			}
+		);
+	  this.storeAbort(req.abort);
+  },
+
+  loadDocuments : function(){
+	  var  resultQuery = "from Document";
+	  this.setState({
+			docAssets : null
+	  });
+	  var req = AtlasHelper.searchAtlasMetadata(resultQuery,
+			function(data){
+				this.setState({
+					docAssets : data
+				});
+			}.bind(this),
+			function(error){
+
+			}
+		);
+	  this.storeAbort(req.abort);
+  },
+
+  render() {
+    var actions = [
+             {
+        	   assetType: ["DataFiles", "Tables", "Documents"],
+        	   actions : [
+	              {
+	            	  label: "Start analysis (annotation types)",
+	            	  func: this.createAnnotations
+	              } ,
+	              {
+	            	  label: "Start analysis (service sequence)",
+	            	  func: this.startAnalysis
+	              } ,
+	              {
+	            	  label: "View in atlas",
+	            	  func: this.viewInAtlas
+	              }
+        	    ]
+	         }
+	     ];
+
+    return (
+    		<div className="jumbotron">
+    		   <h3>Data sets</h3>
+		       <div>
+		       	 <NewAnalysisRequestDialog alertCallback={this.props.alertCallback} dataSetId={this.state.selectedAsset} show={this.state.showAnalysisRequestDialog} onClose={function(){this.setState({showAnalysisRequestDialog: false});}.bind(this)} />
+		       	 <NewCreateAnnotationsDialog alertCallback={this.props.alertCallback} dataSetId={this.state.selectedAsset} show={this.state.showCreateAnnotationsDialog} onClose={function(){this.setState({showCreateAnnotationsDialog: false});}.bind(this)} />
+		         Here are all data sets of the metadata repository that are available for analysis.
+		         <p/>
+		         <Panel collapsible expanded={this.state.showDataFiles} header={
+		        		 <div style={{textAlign:"right"}}>
+				         	<span style={{float: "left"}}>Data Files</span>
+				         	<Button bsStyle="primary" onClick={function(){this.loadDataFiles();}.bind(this)}>
+				         		Refresh
+				         	</Button>
+			         		<Button onClick={this.showHideDataFiles}>
+			         			<Glyphicon glyph={this.state.showHideDataFilesIcon} />
+			         		</Button>
+			         	</div>}>
+		            	<ODFBrowser ref="dataFileBrowser" type={"DataFiles"} selection={this.state.selection} actions={actions} assets={this.state.dataFileAssets} />
+		         </Panel>
+		         <Panel collapsible expanded={this.state.showTables} header={
+		        		 <div style={{textAlign:"right"}}>
+				         	<span style={{float: "left"}}>Relational Tables</span>
+				         	<Button bsStyle="primary" onClick={function(){this.loadTables();}.bind(this)}>
+				         		Refresh
+				         	</Button>
+			         		<Button onClick={this.showHideTables}>
+			         			<Glyphicon glyph={this.state.showHideTablesIcon} />
+			         		</Button>
+			         	</div>}>
+		            	<ODFBrowser ref="tableBrowser" type={"Tables"} actions={actions} assets={this.state.tableAssets} />
+		         </Panel>
+		         <Panel collapsible expanded={this.state.showDocuments}  header={
+		        		 <div style={{textAlign:"right"}}>
+		        		 	<span style={{float: "left"}}>Documents</span>
+		        		 	<Button bsStyle="primary" onClick={function(){this.loadDocuments();}.bind(this)}>
+		        		 		Refresh
+		        		 	</Button>
+		        		 	<Button onClick={this.showHideDocuments}>
+			         			<Glyphicon glyph={this.state.showHideDocumentsIcon} />
+			         		</Button>
+			         	</div>}>
+		     			<ODFBrowser ref="docBrowser" type={"Documents"} actions={actions} assets={this.state.docAssets}/>
+		         </Panel>
+		       </div>
+		    </div>
+		     );
+  }
+
+});
+
+
+////////////////////////////////////////////////////////////////////////
+// main component
+var ODFUI = React.createClass({
+
+   componentDidMount: function() {
+	   $(window).bind("hashchange", this.parseUrl);
+	   this.parseUrl();
+   },
+
+   parseUrl : function(){
+	  var target = constants_ODFNavBar.gettingStarted;
+	  var navAddition = null;
+	  var hash = document.location.hash;
+	  if(hash && hash.length > 1){
+		  hash = hash.split("#")[1];
+		  var split = hash.split("/");
+		  var navHash = split[0];
+		  if(split.length > 0){
+			  navAddition = split.slice(1);
+		  }
+		  if(constants_ODFNavBar[navHash]){
+			  target = constants_ODFNavBar[navHash];
+		  }
+	  }
+	  this.setState({
+		  activeNavBarItem: target,
+	      navAddition: navAddition}
+	  );
+  },
+
+  getInitialState: function() {
+	  return ({
+	      activeNavBarItem: constants_ODFNavBar.gettingStarted,
+	      navAddition: null,
+	      globalAlert: {
+	        type: "",
+	        message: ""
+	      }
+	  });
+  },
+
+  handleNavBarSelection: function(selection) {
+	  $.each(constants_ODFNavBar, function(key, ref){
+		  if(ref == selection){
+			  document.location.hash = key;
+		  }
+	  });
+    this.setState({ activeNavBarItem: selection });
+  },
+
+  handleAlert: function(alertInfo) {
+    this.setState({ globalAlert: alertInfo });
+  },
+
+  render: function() {
+    var alertComp = null;
+    if (this.state.globalAlert.type != "") {
+       alertComp = <Alert bsStyle={this.state.globalAlert.type}>{this.state.globalAlert.message}</Alert>;
+    }
+
+    var contentComponent = <GettingStartedPage alertCallback={this.handleAlert}/>;
+    if (this.state.activeNavBarItem == constants_ODFNavBar.configuration) {
+       contentComponent = <ConfigurationPage alertCallback={this.handleAlert}/>;
+    } else if (this.state.activeNavBarItem == constants_ODFNavBar.discoveryServices) {
+       contentComponent = <DiscoveryServicesPage alertCallback={this.handleAlert}/>;
+    } else if (this.state.activeNavBarItem == constants_ODFNavBar.monitor) {
+       contentComponent = <MonitorPage alertCallback={this.handleAlert}/>;
+    } else if (this.state.activeNavBarItem == constants_ODFNavBar.analysis) {
+       contentComponent = <AnalysisRequestsPage navAddition={this.state.navAddition} alertCallback={this.handleAlert}/>;
+    } else if (this.state.activeNavBarItem == constants_ODFNavBar.data) {
+       contentComponent = <AnalysisDataSetsPage navAddition={this.state.navAddition} alertCallback={this.handleAlert}/>;
+    }
+
+    var divStyle = {
+      marginLeft: "80px",
+      marginRight: "80px"
+    };
+
+    return (
+        <div>
+           <ODFNavBar activeKey={this.state.activeNavBarItem} selectCallback={this.handleNavBarSelection}></ODFNavBar>
+           <div style={divStyle}>
+              {alertComp}
+              {contentComponent}
+           </div>
+        </div>
+    );
+  }
+});
+
+var div = $("#odf-toplevel-div")[0];
+ReactDOM.render(<ODFUI/>, div);
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-globals.js b/odf/odf-web/src/main/webapp/scripts/odf-globals.js
new file mode 100755
index 0000000..d67a2d3
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-globals.js
@@ -0,0 +1,54 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const CONTEXT_ROOT = ""; // window.location.origin + "/" + (window.location.pathname.split("/")[1].length > 0 ? window.location.pathname.split("/")[1] + "/" : "");
+const API_PREFIX = CONTEXT_ROOT + API_PATH;
+const SERVICES_URL = API_PREFIX + "services";
+const ANALYSIS_URL = API_PREFIX + "analyses";
+const ENGINE_URL = API_PREFIX + "engine";
+const CONFIG_URL = API_PREFIX + "config";
+const METADATA_URL = API_PREFIX + "metadata";
+const IMPORT_URL = API_PREFIX + "import";
+const ANNOTATIONS_URL = API_PREFIX + "annotations";
+
+var OdfUrls = {
+	"contextRoot": CONTEXT_ROOT,
+	"apiPrefix": API_PREFIX,
+	"servicesUrl": SERVICES_URL,
+	"analysisUrl": ANALYSIS_URL,
+	"engineUrl": ENGINE_URL,
+	"configUrl": CONFIG_URL,
+	"metadataUrl": METADATA_URL,
+	"importUrl": IMPORT_URL,
+	"annotationsUrl": ANNOTATIONS_URL,
+
+	getPathValue: function(obj, path) {
+	    var value = obj;
+        $.each(path.split("."),
+            function(propKey, prop) {
+               // if value is null, do nothing
+               if (value) {
+                   if(value[prop] != null && value[prop] != undefined){
+                       value = value[prop];
+                   } else {
+                       value = null;
+                   }
+               }
+           }
+        );
+        return value;
+	}
+};
+
+module.exports = OdfUrls;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-logs.js b/odf/odf-web/src/main/webapp/scripts/odf-logs.js
new file mode 100755
index 0000000..ecca602
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-logs.js
@@ -0,0 +1,83 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ReactDOM = require("react-dom");
+var d3 = require("d3");
+var ReactBootstrap = require("react-bootstrap");
+var ReactD3 = require("react-d3-components");
+var ODFGlobals = require("./odf-globals.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var Input = ReactBootstrap.Input;
+
+var REFRESH_DELAY = 5000;
+
+var ODFLogViewer = React.createClass({
+	mixins : [AJAXCleanupMixin],
+
+	getInitialState : function(){
+		return {logLevel : "ALL", log : ""};
+	},
+
+	getLogs : function() {
+		const url = ODFGlobals.engineUrl + "/log?numberOfLogs=50&logLevel=" + this.state.logLevel;
+        var req = $.ajax({
+            url: url,
+            contentType: "text/plain",
+            type: 'GET',
+            success: function(data) {
+               this.setState({log: data});
+            }.bind(this),
+            error: function(xhr, status, err) {
+              var msg = "ODF log request failed, " + err.toString();
+              this.props.alertCallback({type: "danger", message: msg});
+            }.bind(this)
+        });
+
+        this.storeAbort(req.abort);
+	},
+
+	componentWillMount : function() {
+		this.getLogs();
+	},
+
+	componentWillUnmount () {
+	    this.refreshInterval && clearInterval(this.refreshInterval);
+	    this.refreshInterval = false;
+	},
+
+	componentWillReceiveProps: function(nextProps){
+		if(!nextProps.visible){
+			 this.refreshInterval && clearInterval(this.refreshInterval);
+			 this.refreshInterval = false;
+		}else if(!this.refreshInterval){
+			this.refreshInterval = window.setInterval(this.getLogs, REFRESH_DELAY);
+		}
+	},
+	render : function(){
+		return (<div>
+					<h4>ODF system logs</h4>
+					<h5>(This only works for the node this web application is running on, logs from other ODF nodes in a clustered environment will not be displayed)</h5>
+					<Input label="Log level:" type="select" onChange={(el) => {this.setState({logLevel : el.target.value}); this.getLogs()}} value={this.state.logLevel}>
+					<option value="ALL">ALL</option>
+					<option value="FINE">FINE</option>
+					<option value="INFO">INFO</option>
+					<option value="WARNING">WARNING</option>
+				</Input>
+				<textarea disabled style={{width: '100%', height: '700px'}} value={this.state.log} /></div>);
+	}
+});
+
+module.exports = ODFLogViewer;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js b/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js
new file mode 100755
index 0000000..d7072dd
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-metadata-browser.js
@@ -0,0 +1,661 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ReactBootstrap = require("react-bootstrap");
+
+var Panel = ReactBootstrap.Panel;
+var Table = ReactBootstrap.Table;
+var Label = ReactBootstrap.Label;
+var Image = ReactBootstrap.Image;
+var Modal = ReactBootstrap.Modal;
+var Button = ReactBootstrap.Button;
+var FormControls = ReactBootstrap.FormControls;
+var ListGroup = ReactBootstrap.ListGroup;
+var ListGroupItem = ReactBootstrap.ListGroupItem;
+
+var ODFGlobals = require("./odf-globals.js");
+var UISpec = require("./odf-ui-spec.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var Utils = require("./odf-utils.js")
+var AtlasHelper = Utils.AtlasHelper;
+var URLHelper = Utils.URLHelper;
+
+var ODFBrowser = {
+
+	//set rowReferences property and pass an array of atlas references {id : ..., repositoryId: ...}, these rows will then be fetched
+	//or set rowAssets property and pass an array of data that is supposed to be displayed as is
+	ODFPagingTable : React.createClass({
+
+		mixins : [AJAXCleanupMixin],
+
+		getInitialState : function(){
+			var pageSize = (this.props.pageSize ? this.props.pageSize : 5);
+			var rowReferences = this.props.rowReferences;
+			var rowAssets = this.props.rowAssets;
+			var max = (rowReferences ? rowReferences.length : (rowAssets ? rowAssets.length : 0));
+			var pageRows = (rowAssets ? rowAssets.slice(0, pageSize) : null);
+
+			return {
+				pageRows : pageRows,
+				max : 0, tablePage : 0,
+				pageSize : pageSize,
+				max: max,
+				tablePage: 0,
+				rowReferenceLoadingAborts : []
+			};
+		},
+
+		componentDidMount : function() {
+			if(this.props.rowReferences){
+				var pagerowReferences = this.props.rowReferences.slice(0, this.state.pageSize);
+				this.loadRows(pagerowReferences);
+			}
+		},
+
+		componentWillReceiveProps : function(nextProps){
+			if(!this.isMounted()){
+				return;
+			}
+			this.setStateFromProps(nextProps);
+		},
+
+		setStateFromProps : function(nextProps){
+			if(nextProps.rowReferences && !Utils.arraysEqual(this.props.rowReferences, nextProps.rowReferences)){
+				this.setState({max: nextProps.rowReferences.length, tablePage: 0});
+				var pagerowReferences = nextProps.rowReferences.slice(0, this.state.pageSize);
+				this.loadRows(pagerowReferences);
+			}else if(nextProps.rowAssets && !Utils.arraysEqual(this.props.rowAssets, nextProps.rowAssets)){
+				var rows = nextProps.rowAssets.slice(0, this.state.pageSize);
+				this.setState({pageRows : rows, max: nextProps.rowAssets.length, tablePage: 0});
+			}
+		},
+
+		getType : function(){
+			if(this.props.assetType){
+				return this.props.assetType;
+			}else if(this.state.pageRows && this.state.pageRows.length > 0 && this.state.pageRows[0].type){
+				return this.state.pageRows[0].type;
+			}
+		},
+
+		getUISpec : function(){
+			if(this.props.spec){
+				return this.props.spec;
+			}
+			return UISpec[this.getType()];
+		},
+
+		loadRows : function(rowReferences){
+			$.each(this.state.rowReferenceLoadingAborts, function(key, abort){
+				if(abort && abort.call){
+					abort.call();
+				}
+			});
+
+			this.setState({pageRows: [], rowReferenceLoadingAborts: []});
+
+			var reqs = AtlasHelper.loadAtlasAssets(rowReferences,
+				function(rowAsset){
+					var rowData = this.state.pageRows;
+					rowData.push(rowAsset);
+					if(this.isMounted()){
+						this.setState({pageRows : rowData});
+					}
+					if(rowReferences && rowData && rowData.length == rowReferences.length && this.props.onLoad){
+						this.props.onLoad(rowData);
+					}
+				}.bind(this),
+				function(err){
+
+				}
+			);
+			var aborts = [];
+			$.each(reqs, function(key, val){
+				var aborts = this.state.rowReferenceLoadingAborts;
+				aborts.push(val.abort);
+				this.setState({rowReferenceLoadingAborts: aborts});
+			}.bind(this));
+
+			this.storeAbort(aborts);
+		},
+
+		previousPage : function(){
+			if(this.state.tablePage > -1){
+				var tablePage = this.state.tablePage - 1;
+				this.setState({tablePage : tablePage});
+				if(this.props.rowAssets){
+					var rows = this.props.rowAssets.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
+					this.setState({pageRows : rows});
+				}else if(this.props.rowReferences){
+					var rowRefs = this.props.rowReferences.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
+					this.loadRows(rowRefs);
+				}
+			}
+		},
+
+		nextPage : function(){
+			var max = this.state.max;
+			if((this.state.tablePage * this.state.pageSize) < max){
+				var tablePage = this.state.tablePage + 1;
+				this.setState({tablePage : tablePage});
+				if(this.props.rowAssets){
+					var rows = this.props.rowAssets.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
+					this.setState({pageRows : rows})
+				}else if(this.props.rowReferences){
+					var rows = this.props.rowReferences.slice(tablePage * this.state.pageSize, (tablePage + 1) * this.state.pageSize)
+					this.loadRows(rows);
+				}
+			}
+		},
+
+		sortAlphabetical : function(a, b){
+			if(this.getUISpec() && this.getUISpec().attributes){
+				var attrs = this.getUISpec().attributes;
+				var sortProp = null;
+				for(var no = 0; no < attrs.length; no++){
+					if(attrs[no].sort == true){
+						sortProp = attrs[no].key;
+						var aProp = a[sortProp].toLowerCase();
+						var bProp = b[sortProp].toLowerCase();
+						return ((aProp < bProp) ? -1 : ((aProp > bProp) ? 1 : 0));
+					}
+				}
+			}
+			return 0;
+		},
+
+		onRowClick : function(rowData){
+			if(this.props.onRowClick){
+				var type = this.getType();
+				if(type){
+					//new type is singular of list type ...
+					type = type.substring(0, type.length - 1);
+				}
+				this.props.onRowClick(rowData, type);
+			}
+		},
+
+		parseValue : function(value){
+		    if (value) {
+		        if(Array.isArray(value)){
+		            return value.length;
+		        }else if(typeof value === "object" && value.id && value.url && value.repositoryId){
+		            return <a href={value.url}>{value.id}</a>;
+		        }else if(typeof value === "object"){
+		            return JSON.stringify(value);
+		        }
+		    }
+			return value;
+		},
+
+		render : function(){
+			var loadingImg = <Image src="img/lg_proc.gif" rounded />;
+			var contentRows = [];
+			var pageIndicator = "(0-0)";
+			if(this.state.pageRows){
+				loadingImg = null;
+				this.state.pageRows.sort(this.sortAlphabetical);
+				$.each(this.state.pageRows, function(key, rowData){
+					var displayProperties = [];
+					var icon = null;
+					if(this.getUISpec()){
+						displayProperties = this.getUISpec().attributes;
+						if(this.getUISpec().icon){
+							icon = this.getUISpec().icon;
+						}
+					}else{
+						$.each(rowData, function(propName, val){
+							var label = propName;
+							if(label && label[0]){
+								label = label[0].toUpperCase() + label.slice(1);
+							}
+							displayProperties.push({key: propName, label: label});
+						});
+					}
+
+					var colCss = {};
+					if(this.props.actions){
+						colCss.paddingTop = "26px";
+					}
+					var columns = [<td style={colCss} key={"iconCol" + key}>{icon}</td>];
+					$.each(displayProperties,
+						function(key, propObj){
+							//properties can be a path such as prop1.prop2
+					        var value = ODFGlobals.getPathValue(rowData, propObj.key);
+
+							if(propObj.func){
+								value = propObj.func(value, rowData);
+							}else{
+								value = this.parseValue(value);
+							}
+
+							var col = <td style={colCss} key={propObj.key}>{value}</td>;
+							columns.push(col);
+						}.bind(this)
+					);
+
+					if(this.props.actions){
+						var btns = [];
+						$.each(this.props.actions, function(key, obj){
+							if(obj.assetType.indexOf(this.getType()) > -1){
+									$.each(obj.actions, function(actKey, action){
+										if((action.filter && action.filter(rowData)) || !action.filter){
+											var btn = <div key={actKey}><Button onClick={function(e){e.stopPropagation(); action.func(rowData);}}>{action.label}</Button><br/></div>;
+											btns.push(btn);
+										}
+									});
+							}
+						}.bind(this));
+						columns.push(<td key={"actionBtns"}>{btns}</td>);
+					}
+
+					var rowCss = {};
+					if(this.props.onRowClick){
+						rowCss.cursor = "pointer";
+					}
+
+					var row = <tr style={rowCss} onClick={function(){this.onRowClick(rowData);}.bind(this)} key={key}>
+								{columns}
+							  </tr>;
+					contentRows.push(row);
+				}.bind(this));
+
+				var max = this.state.max;
+				var min = (max > 0 ? (this.state.tablePage * this.state.pageSize + 1)  : 0);
+				pageIndicator = "(" + min + "-";
+				if((this.state.tablePage + 1) * this.state.pageSize >= max){
+					pageIndicator += max + ")";
+				}else{
+					pageIndicator += (this.state.tablePage + 1) * this.state.pageSize + ")";
+				}
+			}
+
+			var header = [];
+			var lbls = [""];
+
+			if(this.getUISpec()){
+				$.each(this.getUISpec().attributes, function(key, propObj){
+					lbls.push(propObj.label);
+				});
+			}else if(this.state.pageRows && this.state.pageRows.length > 0){
+				$.each(this.state.pageRows[0], function(key, val){
+					lbls.push(key[0].toUpperCase() + key.slice(1));
+				});
+			}
+			if(this.props.actions){
+				lbls.push("Actions");
+			}
+
+			$.each(lbls, function(key, val){
+					var headerCss = null;
+					if(val == "Actions"){
+						headerCss = {paddingLeft: "38px"};
+					}
+					var th = <th style={headerCss} key={key}>{val}</th>;
+					header.push(th);
+				});
+
+			return <div style={this.props.style}>
+					<div style={{minHeight:250}}>
+					<Table responsive>
+						<thead>
+							<tr>
+								{header}
+							</tr>
+						</thead>
+						<tbody>
+							{contentRows}
+						</tbody>
+					</Table>
+					</div>
+					<Button disabled={(this.state.pageRows==null || this.state.tablePage <= 0 )} onClick={this.previousPage}>previous</Button>
+					<span>
+						{pageIndicator}
+					</span>
+					<Button disabled={(this.state.pageRows==null || (this.state.tablePage + 1) * this.state.pageSize >= this.state.max)} onClick={this.nextPage}>next</Button>
+				</div>;
+		}
+	}),
+
+	ODFAssetDetails : React.createClass({
+
+		mixins : [AJAXCleanupMixin],
+
+		onHide : function(){
+			if(this.props.onHide){
+				this.props.onHide();
+			}
+			if(this.isMounted()){
+				this.setState({show: true});
+			}
+		},
+
+		getInitialState : function(){
+			return {
+					show : true
+					};
+		},
+
+		getType : function(){
+			if(this.props.assetType){
+				return this.props.assetType;
+			}else if(this.props.asset && this.props.asset.type){
+				return this.props.asset.type;
+			}
+			return null;
+		},
+
+		getUISpec : function(asset){
+			return UISpec[this.getType()];
+		},
+
+		getPropertiesByType : function(srcObject, uiSpecAttributes){
+			var properties = [];
+			var references = [];
+			var lists = [];
+			var objects = [];
+			if (uiSpecAttributes) {
+	            var label = null;
+	            var func = null;
+	            var key = null;
+	            $.each(uiSpecAttributes, function(index, property){
+	                var value = ODFGlobals.getPathValue(srcObject, property.key);
+	                if (value) {
+	                    if(property.func){
+	                        value = property.func(value, srcObject);
+	                    }
+	                    var obj = property;
+	                    obj.value = value;
+	                    if(value && Array.isArray(value)){
+	                        lists.push(obj);
+	                    }else if(value && value.id && value.repositoryId){
+	                        references.push(obj);
+	                    } /*else if(typeof value === "object"){
+	                        objects.push(obj);
+	                    } */else{
+	                        properties.push(obj);
+	                    }
+	                }
+	            }.bind(this) );
+			}
+	        return {lists: lists, properties: properties, references: references, objects: objects};
+		},
+
+		sortPropsByLabelPosition : function(properties, uiSpecAttributes){
+			if(uiSpecAttributes){
+				properties.sort(function(val1, val2){
+					var index1 = -1;
+					var index2 = -1;
+					for(var no = 0; no < uiSpecAttributes.length; no++){
+						if(uiSpecAttributes[no].label == val1.label){
+							index1 = no;
+						}else if(uiSpecAttributes[no].label == val2.label){
+							index2 = no
+						}
+						if(index1 != -1 && index2 != -1){
+							break;
+						}
+					}
+					if(index1 > index2){
+						return 1;
+					}else if(index1 < index2){
+						return -1;
+					}
+					return 0;
+				});
+			}
+		},
+
+		createPropertiesJSX : function(properties){
+			var props = [];
+			$.each(properties, function(key, val){
+				var value = val.value;
+				if(value){
+					var prop = <FormControls.Static key={key} label={val.label} standalone>{val.value}</FormControls.Static>
+					props.push(prop);
+				}
+			}.bind(this));
+			return props;
+		},
+
+		createReferenceJSX : function(references){
+			var refs = [];
+			$.each(references, function(key, val){
+				var prop = <a key={key} href={val.value.url}>{val.label}</a>
+				refs.push(prop);
+			}.bind(this));
+			return refs;
+		},
+
+		createObjectJSX : function(objects){
+			var objs = [];
+			$.each(objects, function(key, val){
+				var obj = <span key={key}>{JSON.stringify(val.value)}</span>;
+				objs.push(obj);
+			}.bind(this));
+
+			return objs;
+		},
+
+		createTableJSX : function(lists){
+			var tables = [];
+			$.each(lists, function(key, val){
+				var isRemote = false;
+				var first = val.value[0];
+				var rowReferences = null;
+				var rowAssets = null;
+				if(first && first.id && first.repositoryId){
+					rowReferences = val.value;
+				}else{
+					rowAssets = val.value;
+				}
+
+				var spec = null;
+				var label = val.label.toLowerCase();
+				var type = label;
+				if(val.uiSpec){
+					spec = UISpec[val.uiSpec];
+				}else{
+					spec = UISpec[type];
+				}
+
+				var table = <div key={val.label + "_" + key}>
+								<h3>{val.label}</h3>
+								<ODFBrowser.ODFPagingTable rowAssets={rowAssets} assetType={type} rowReferences={rowReferences} onRowClick={this.props.onReferenceClick} spec={spec}/>
+							</div>;
+				tables.push(table);
+			}.bind(this));
+
+			return tables;
+		},
+
+		render : function(){
+			var loadingOverlay = <div style={{position:"absolute", width:"100%", height:"100%", left:"50%", top: "30%"}}><Image src="img/lg_proc.gif" rounded /></div>;
+			if(!this.props.loading){
+				loadingOverlay = null;
+			}
+
+			var tablesPanel = <Panel collapsible defaultExpanded={false} header="References">
+	          </Panel>;
+			var propertiesPanel = <Panel collapsible defaultExpanded={false} header="Properties">
+	          </Panel>;
+
+			if(this.props.asset){
+				var uiSpecAttrs = this.getUISpec(this.props.asset).attributes;
+				if(!uiSpecAttrs){
+					uiSpecAttrs = [];
+					$.each(this.props.asset, function(propName, val){
+						var label = propName;
+						if(label && label[0]){
+							label = label[0].toUpperCase() + label.slice(1);
+						}
+						uiSpecAttrs.push({key: propName, label: label});
+					});
+				}
+				var allProps = this.getPropertiesByType(this.props.asset, uiSpecAttrs);
+
+				var properties = allProps.properties;
+				var references = allProps.references;
+				var objects = allProps.objects;
+				var lists = allProps.lists;
+
+				var props = [];
+				var refs = [];
+				var objs = [];
+				var tables = [];
+
+				this.sortPropsByLabelPosition(properties, uiSpecAttrs);
+				props = this.createPropertiesJSX(properties);
+				refs = this.createReferenceJSX(references);
+				objs = this.createObjectJSX(objects);
+				tables = this.createTableJSX(lists);
+
+				if(props.length > 0 || refs.length > 0 || objs.length > 0){
+					propertiesPanel = <Panel collapsible defaultExpanded={true} header="Properties">
+							     		{props}
+							     		{refs}
+							     		{objs}
+							     	  </Panel>;
+				}
+
+				if(tables.length > 0){
+					tablesPanel = <Panel collapsible defaultExpanded={true} header="References">
+						     		{tables}
+						          </Panel>;
+				}
+			}
+
+			var icon = null;
+			if(this.getUISpec(this.props.asset) && this.getUISpec(this.props.asset).icon){
+				icon = this.getUISpec(this.props.asset).icon;
+			}
+
+		    var title = <span>{icon} Details</span>;
+		    if(this.props.asset && this.props.asset.reference){
+		          title = <div>{title} <a target="_blank" href={this.props.asset.reference.url}>( {this.props.asset.reference.id} )</a></div>;
+		    }
+			return <Modal show={this.props.show} onHide={this.onHide}>
+			        	<Modal.Header closeButton>
+			           <Modal.Title>{title}</Modal.Title>
+				        </Modal.Header>
+				        <Modal.Body>
+					    	{loadingOverlay}
+					        {propertiesPanel}
+					        {tablesPanel}
+						</Modal.Body>
+				       <Modal.Footer>
+				       <Button onClick={function(){this.onHide();}.bind(this)}>Close</Button>
+				       </Modal.Footer>
+					</Modal>
+		}
+
+	}),
+
+	//Atlas Metadata browser: either pass an atlas query in the query property in order to execute the query and display the results
+	ODFMetadataBrowser : React.createClass({
+
+		mixins : [AJAXCleanupMixin],
+
+		getInitialState : function() {
+			return ({
+				assets: null,
+				loadingAssetDetails: false
+				});
+		},
+
+		componentWillMount : function() {
+			if(this.props.selection){
+				this.loadSelectionFromAtlas(this.props.selection);
+			}
+		},
+
+		referenceClick: function(val, type){
+			if(!type || (type && val.type)){
+				type = val.type;
+			}
+			var selectedAsset = {id: val.reference.id, repositoryId: val.reference.repositoryId, type: type};
+			URLHelper.setUrlHash(selectedAsset);
+		},
+
+		loadSelectionFromAtlas : function(selection){
+			if(selection){
+				this.setState({showAssetDetails: true, loadingAssetDetails: true});
+				var sel = selection;
+				if(!sel.id){
+					sel = JSON.parse(decodeURIComponent(sel));
+				}
+
+				var loading = false;
+				if(sel.id && sel.repositoryId){
+					if(!this.state.assetDetails || !this.state.assetDetails.reference || this.state.assetDetails.reference &&
+							(this.state.assetDetails.reference.id != sel.id ||
+							this.state.assetDetails.reference.repositoryId != sel.repositoryId)){
+						loading = true;
+						var req = AtlasHelper.loadAtlasAsset(sel,
+								function(data){
+									if(!data.type && sel.type){
+										data.type = sel.type;
+									}
+									var state = {
+											assetDetails: data,
+											loadingAssetDetails: false};
+									this.setState(state);
+								}.bind(this),
+								function(){
+
+								}
+						);
+						this.storeAbort(req.abort);
+					}
+				}
+				if(!loading && this.state.loadingAssetDetails){
+					this.setState({loadingAssetDetails: false});
+				}
+			}
+		},
+
+		componentWillReceiveProps : function(nextProps){
+			if(!this.isMounted()){
+				return;
+			}
+			var newState = {};
+			if(nextProps.selection && this.props.selection != nextProps.selection){
+				this.loadSelectionFromAtlas(nextProps.selection);
+			}else if(nextProps.selection == null){
+				newState.assetDetails = null;
+				newState.showAssetDetails = false;
+			}
+			this.setState(newState);
+		},
+
+		render : function(){
+			var loadingImg = null;
+			var list = null;
+			if(this.props.assets){
+				list = <ODFBrowser.ODFPagingTable actions={this.props.actions} rowAssets={this.props.assets} onRowClick={this.referenceClick} assetType={this.props.type}/>;
+			}else{
+				loadingImg = <Image src="img/lg_proc.gif" rounded />;
+			}
+
+			return <div>{list}
+						{loadingImg}
+						<ODFBrowser.ODFAssetDetails show={this.state.assetDetails != null || this.state.loadingAssetDetails} loading={this.state.loadingAssetDetails} key={(this.state.assetDetails ? this.state.assetDetails.id : "0")} onReferenceClick={this.referenceClick} asset={this.state.assetDetails} onHide={function(){URLHelper.setUrlHash(); this.setState({assetDetails : null})}.bind(this)} />
+					</div>;
+		}
+	})
+}
+
+module.exports = ODFBrowser;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-mixins.js b/odf/odf-web/src/main/webapp/scripts/odf-mixins.js
new file mode 100755
index 0000000..40c0aa9
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-mixins.js
@@ -0,0 +1,51 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+
+var AJAXCleanupMixin = {
+
+	componentWillMount: function() {
+		this.requestAborts = [];
+	},
+
+	storeAborts : function(aborts) {
+		if(Array.isArray(aborts)){
+			$.each(aborts, function(key, val){
+				this.storeAbort(val);
+			}.bind(this));
+		}
+	},
+
+	storeAbort : function(abort) {
+		if(Array.isArray(abort)){
+			$.each(abort, function(key, val){
+				this.requestAborts.push(val);
+			}.bind(this));
+		}else{
+			this.requestAborts.push(abort);
+		}
+	},
+
+	componentWillUnmount : function() {
+		$.each(this.requestAborts, function(key, val){
+			if(val && val.call){
+				val.call();
+			}
+		});
+	}
+};
+
+module.exports = AJAXCleanupMixin;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-notifications.js b/odf/odf-web/src/main/webapp/scripts/odf-notifications.js
new file mode 100755
index 0000000..a3b99ce
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-notifications.js
@@ -0,0 +1,171 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ReactDOM = require("react-dom");
+var d3 = require("d3");
+var ReactBootstrap = require("react-bootstrap");
+var ReactD3 = require("react-d3-components");
+var ODFGlobals = require("./odf-globals.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var ReactD3 = require("react-d3-components");
+var LineChart = ReactD3.LineChart;
+var Input = ReactBootstrap.Input;
+var Image = ReactBootstrap.Image;
+
+var REFRESH_DELAY = 5000;
+
+var CurrentNotificationsGraph = React.createClass({
+
+	tooltipLine : function(label, data) {
+        return "Arrived notifications " + data.y;
+    },
+
+	render : function(){
+		var lineChart = null;
+
+		if(this.props.values){
+			var data = [
+			        {
+			        	label: 'Asset notifications',
+			            values: [ ]
+			         }
+			    ];
+
+			for(var no = 0; no < this.props.values.length; no++){
+				data[0].values.push({x : no + 1, y : this.props.values[no]});
+			};
+
+			lineChart = (<LineChart
+			                data={data}
+							width={400}
+			                height={400}
+			                margin={{top: 10, bottom: 50, left: 50, right: 10}}
+			                tooltipContained
+		                    tooltipHtml={this.tooltipLine}
+			                shapeColor={"red"}
+			 				xAxis={{tickValues: []}}
+							/>);
+		}
+
+		return (
+				<div>
+					<h4>Number of received notifications</h4>
+					<h5>(This only works for the node this web application is running on.  In a clustered environment, notifications could be processed on another node and therefore not be visible here)</h5>
+
+					{lineChart}
+				</div>);
+	}
+
+
+});
+
+var ODFNotificationsGraph = React.createClass({
+	mixins : [AJAXCleanupMixin],
+
+	getInitialState : function(){
+		return {notifications : [], notificationCount : [0]};
+	},
+
+	getNotifications : function(){
+		const url = ODFGlobals.metadataUrl + "/notifications?numberOfNotifications=50";
+        var req = $.ajax({
+            url: url,
+            contentType: "application/json",
+            type: 'GET',
+            success: function(data) {
+            	this.setState({notifications: data.notifications});
+            }.bind(this),
+            error: function(xhr, status, err) {
+              var msg = "ODF notification request failed, " + err.toString();
+              this.props.alertCallback({type: "danger", message: msg});
+            }.bind(this)
+        });
+
+        this.storeAbort(req.abort);
+	},
+
+	getNotificationCount : function() {
+		const url = ODFGlobals.metadataUrl + "/notifications/count";
+        var req = $.ajax({
+            url: url,
+            contentType: "application/json",
+            type: 'GET',
+            success: function(data) {
+            	var current = this.state.notificationCount;
+            	if(!current){
+            		current = [];
+            	}else if(current.length > 1 && current[current.length - 1] != current[current.length - 2]){
+            		this.getNotifications();
+            	}
+            	if(current.length == 10){
+            		current.splice(0, 1);
+            	}
+            	current.push(data.notificationCount);
+               this.setState({notificationCount: current});
+            }.bind(this),
+            error: function(xhr, status, err) {
+              var msg = "ODF notification count request failed, " + err.toString();
+              this.props.alertCallback({type: "danger", message: msg});
+            }.bind(this)
+        });
+
+        this.storeAbort(req.abort);
+	},
+
+	componentWillMount : function() {
+		this.getNotifications();
+		this.getNotificationCount();
+	},
+
+	componentWillUnmount () {
+	    this.refreshInterval && clearInterval(this.refreshInterval);
+	    this.refreshInterval = false;
+	},
+
+	componentWillReceiveProps: function(nextProps){
+		if(!nextProps.visible){
+			 this.refreshInterval && clearInterval(this.refreshInterval);
+			 this.refreshInterval = false;
+		}else if(!this.refreshInterval){
+			this.refreshInterval = window.setInterval(this.getNotificationCount, REFRESH_DELAY);
+		}
+	},
+	render : function(){
+		var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
+
+		var notificationGraph = null;
+		if(this.state){
+			progressIndicator = null;
+			notificationGraph = <CurrentNotificationsGraph values={this.state.notificationCount} />;
+		}
+
+		var notificationsValue = "";
+		$.each(this.state.notifications, function(key, val){
+			notificationsValue +="\n";
+			notificationsValue += val.type + " , " + val.asset.repositoryId + " -- " + val.asset.id;
+		});
+
+		return (
+				<div>
+					{progressIndicator}
+					{notificationGraph}
+					<textarea disabled style={{width: '100%', height: '300px'}} value={notificationsValue} />
+				</div>);
+
+	}
+});
+
+module.exports = ODFNotificationsGraph;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js b/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js
new file mode 100755
index 0000000..55c053b
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-request-browser.js
@@ -0,0 +1,154 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ReactBootstrap = require("react-bootstrap");
+
+var ODFAssetDetails = require("./odf-metadata-browser.js").ODFAssetDetails;
+var ODFPagingTable = require("./odf-metadata-browser.js").ODFPagingTable;
+var ODFGlobals = require("./odf-globals.js");
+var AtlasHelper = require("./odf-utils.js").AtlasHelper;
+var URLHelper = require("./odf-utils.js").URLHelper;
+var AJAXCleanupMixin = require("./odf-mixins.js");
+
+var Image = ReactBootstrap.Image;
+
+var ODFRequestBrowser = React.createClass({
+
+	mixins : [AJAXCleanupMixin],
+
+	getInitialState : function(){
+		return {assetDetails : null, loadingAssetDetails: false};
+	},
+
+	getDiscoveryServiceNameFromId(id) {
+		if(!this.props.registeredServices){
+			return id;
+		}
+		var servicesWithSameId = this.props.registeredServices.filter(
+	         function(dsreg) {
+	             return dsreg.id == id;
+	         }
+		);
+		if (servicesWithSameId.length > 0) {
+			return servicesWithSameId[0].name;
+		}
+		return id;
+	},
+
+	loadSelectedRequestStatus: function(requestId){
+		if(requestId){
+			this.setState({showAssetDetails: true, loadingAssetDetails: true});
+
+			var req = $.ajax({
+	            url: ODFGlobals.analysisUrl + "/" + requestId,
+	            contentType: "application/json",
+	            dataType: 'json',
+	            type: 'GET',
+	            success: function(data) {
+	            	$.each(data.serviceRequests, function(key, request){
+	            		var serviceName = this.getDiscoveryServiceNameFromId(request.discoveryServiceId);
+	            		request.discoveryServiceName = serviceName;
+            		}.bind(this));
+
+	               this.setState({assetType: "request", assetDetails: data, loadingAssetDetails: false});
+	            }.bind(this),
+	            error: function(data){
+	            	this.setState({loadingAssetDetails: false});
+	            }.bind(this)
+	        });
+		    this.storeAbort(req.abort);
+
+			if(this.state.loadingAssetDetails){
+				this.setState({loadingAssetDetails: false});
+			}
+		}
+	},
+
+	loadSelectionFromAtlas : function(selection){
+		if(selection){
+			this.setState({showAssetDetails: true, loadingAssetDetails: true});
+			var sel = selection;
+			if(!sel.id){
+				sel = JSON.parse(decodeURIComponent(sel));
+			}
+
+			var loading = false;
+			if(sel.id && sel.repositoryId){
+				if(!this.state.assetDetails || !this.state.assetDetails.reference || this.state.assetDetails.reference &&
+						(this.state.assetDetails.reference.id != sel.id ||
+						this.state.assetDetails.reference.repositoryId != sel.repositoryId)){
+						loading = true;
+						var req = AtlasHelper.loadAtlasAsset(sel,
+							function(data){
+								if(!data.type && sel.type){
+									data.type = sel.type;
+								}
+								var state = {
+										assetDetails: data, assetType: data.type, loadingAssetDetails: false};
+								this.setState(state);
+							}.bind(this),
+							function(){
+
+							}
+						);
+					    this.storeAbort(req.abort);
+				}
+			}
+
+			if(!loading && this.state.loadingAssetDetails){
+				this.setState({loadingAssetDetails: false});
+			}
+		}
+	},
+
+	componentWillReceiveProps : function(nextProps){
+		if(!this.isMounted()){
+			return;
+		}
+		var newState = {};
+		if((nextProps.selection && this.props.selection && this.props.selection.id != nextProps.selection.id) || (nextProps.selection && this.props.selection == null)){
+			if(nextProps.selection.id && nextProps.selection.repositoryId){
+				this.loadSelectionFromAtlas(nextProps.selection);
+			}else{
+				this.loadSelectedRequestStatus(nextProps.selection);
+			}
+		}else if(nextProps.selection == null){
+			newState.assetDetails = null;
+		}
+		this.setState(newState);
+	},
+
+	rowClick : function(val, type){
+		if(!type || (type && val.type)){
+			type = val.type;
+		}
+		if(val && val.reference && val.reference.id){
+			var selectedAsset = {id: val.reference.id, repositoryId: val.reference.repositoryId, type: type};
+			URLHelper.setUrlHash(JSON.stringify(selectedAsset));
+		}else if(val && val.request && val.request.id){
+			URLHelper.setUrlHash(JSON.stringify({requestId : val.request.id}));
+		}
+	},
+
+	render : function(){
+		return <div>
+					<ODFPagingTable actions={this.props.actions} rowAssets={this.props.assets} onRowClick={this.rowClick} assetType="requests"/>
+					<ODFAssetDetails show={this.state.assetDetails != null || this.state.loadingAssetDetails} loading={this.state.loadingAssetDetails} key={(this.state.assetDetails ? this.state.assetDetails.id : "0")} onReferenceClick={this.rowClick} asset={this.state.assetDetails} assetType={this.state.assetType} onHide={function(){URLHelper.setUrlHash(); this.setState({showAssetDetails : false})}.bind(this)} />
+				</div>;
+	}
+});
+
+module.exports = ODFRequestBrowser;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-services.js b/odf/odf-web/src/main/webapp/scripts/odf-services.js
new file mode 100755
index 0000000..cf5314b
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-services.js
@@ -0,0 +1,251 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//js imports
+var $ = require("jquery");
+var bootstrap = require("bootstrap");
+
+var React = require("react");
+var ReactDOM = require("react-dom");
+var LinkedStateMixin = require("react-addons-linked-state-mixin");
+var ReactBootstrap = require("react-bootstrap");
+
+var ODFGlobals = require("./odf-globals.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var configurationStore = require("./odf-utils.js").ConfigurationStore;
+var servicesStore = require("./odf-utils.js").ServicesStore;
+
+var Button = ReactBootstrap.Button;
+var Jumbotron = ReactBootstrap.Jumbotron;
+var Grid = ReactBootstrap.Grid;
+var Row = ReactBootstrap.Row;
+var Col = ReactBootstrap.Col;
+var Table = ReactBootstrap.Table;
+var Modal = ReactBootstrap.Modal;
+var Input = ReactBootstrap.Input;
+var Alert = ReactBootstrap.Alert;
+var Panel = ReactBootstrap.Panel;
+var Label = ReactBootstrap.Label;
+var Input = ReactBootstrap.Input;
+var Image = ReactBootstrap.Image;
+
+var DiscoveryServiceInfo = React.createClass({
+	mixins : [AJAXCleanupMixin],
+
+    testService() {
+        const url = ODFGlobals.servicesUrl + "/" + this.props.dsreg.id;
+        var req = $.ajax({
+            url: url,
+            contentType: "application/json",
+            dataType: 'json',
+            type: 'GET',
+            success: function(data) {
+                var type = "success";
+                if (data.status != "OK") {
+                    type = "danger";
+                }
+                var msg = "Status of ODF service '" + this.props.dsreg.name + "' is "+ data.status +" (" + data.message + ")";
+                this.props.alertCallback({type: type, message: msg});
+            }.bind(this),
+            error: function(xhr, status, err) {
+            	if(status != "abort" ){
+            		console.error(url, status, err.toString());
+            	}
+         	    if(this.isMounted()){
+         	    	var msg = "Service test failed: " + status + ", " + err.toString();
+         	    	this.props.alertCallback({type: "danger", message: msg});
+         	    }
+            }.bind(this)
+        });
+
+        this.storeAbort(req.abort);
+    },
+
+    deleteService() {
+        const url = ODFGlobals.servicesUrl + "/" + this.props.dsreg.id;
+        $.ajax({
+            url: url,
+            type: 'DELETE',
+            success: function(data) {
+            	if(this.isMounted()){
+            		this.props.refreshCallback();
+            	}
+            }.bind(this),
+            error: function(xhr, status, err) {
+            	if(status != "abort" ){
+            		console.error(url, status, err.toString());
+            	}
+            	if(this.isMounted()){
+	              var msg = "Service could not be deleted: " + status + ", " + err.toString();
+	              this.props.alertCallback({type: "danger", message: msg});
+			  }
+			}.bind(this)
+        });
+    },
+
+	render() {
+    	var icon = "";
+    	var imgUrl = this.props.dsreg.iconUrl;
+    	//urls will be used directly.
+    	if(imgUrl != null && (imgUrl.trim().startsWith("http://") || imgUrl.trim().startsWith("https://"))){
+    		icon = imgUrl;
+    	}else{
+    		icon = ODFGlobals.servicesUrl + "/" + encodeURIComponent(this.props.dsreg.id) + "/image";
+    	}
+
+    	var endpointInfo = <span>No additional information</span>;
+    	if (this.props.dsreg.endpoint.type == "Java") {
+            endpointInfo = <span><em>Java class name</em>: {this.props.dsreg.endpoint.className}</span>;
+    	}
+		return (
+				<Grid>
+				  <Row className="show-grid">
+				    <Col sm={1}>
+		             <div >
+		               <Image src={icon} rounded/>
+	   	             </div>
+		            </Col>
+		            <Col sm={4}>
+	             	  <b>{this.props.dsreg.name}</b>
+	             	  <br/>
+	             	  {this.props.dsreg.description}
+	             	  <br/>
+	             	  <a href={this.props.dsreg.link} target="_blank">More</a>
+		              </Col>
+	             	<Col sm={5}>
+	             	  <em>Type</em>: {this.props.dsreg.endpoint.type}
+	       	          <br/>
+	       	          {endpointInfo}
+	       	          <br/>
+	       	          <em>ID</em>: {this.props.dsreg.id}
+	       	          <br/>
+	       	          <em>Protocol</em>: {this.props.dsreg.protocol}
+	             	</Col>
+	             	<Col sm={2}>
+	             	  <Button bsStyle="primary" onClick={this.testService}>Test</Button>
+	             	  <br/>
+	             	 <Button bsStyle="warning" onClick={this.deleteService}>Delete</Button>
+	             	</Col>
+		         </Row>
+		      </Grid>
+		);
+	}
+});
+
+var AddDiscoveryServiceButton = React.createClass({
+  mixins: [LinkedStateMixin, AJAXCleanupMixin],
+
+  getInitialState() {
+	  return({showModal: false, serviceEndpointType: "Spark", parallelismCount: 2, serviceInterfaceType: "DataFrame"});
+  },
+
+  open() {
+    this.setState({showModal: true, errorMessage: null});
+  },
+
+  close() {
+    this.setState({showModal: false});
+  },
+
+  addService() {
+
+	var newService = JSON.parse(JSON.stringify(this.state));
+	delete newService.showModal;
+	delete newService.errorMessage
+
+	 var sparkEndpoint = {
+        jar: newService.serviceApplication,
+        className: newService.serviceClassName,
+        inputMethod: newService.serviceInterfaceType,
+        runtimeName: newService.serviceEndpointType
+	 };
+
+	newService.endpoint = sparkEndpoint;
+
+    delete newService.serviceEndpointType;
+    delete newService.serviceType;
+    delete newService.serviceApplication;
+    delete newService.serviceClassName;
+    delete newService.serviceInterfaceType;
+
+    $.ajax({
+      url: ODFGlobals.servicesUrl,
+      contentType: "application/json",
+      type: 'POST',
+      data: JSON.stringify(newService),
+      success: function(data) {
+		  if(this.isMounted()){
+			  this.close();
+			  this.props.refreshCallback();
+		  }
+      }.bind(this),
+      error: function(xhr, status, err) {
+		  if(this.isMounted()){
+			var errorMsg = status;
+			if(xhr.responseJSON && xhr.responseJSON.error){
+    				errorMsg = xhr.responseJSON.error;
+    		  	}
+		    	var msg = "Service could not be added: " + errorMsg + ", " + err.toString();
+	    	  	this.setState({errorMessage: msg});
+		  }
+      }.bind(this)
+    });
+  },
+
+  render() {
+    var alert = null;
+    if (this.state.errorMessage) {
+       alert = <Alert bsStyle="danger">{this.state.errorMessage}</Alert>;
+    }
+
+  	var endpointInput = null;
+	endpointInput = <div>
+						<Input type="text" valueLink={this.linkState("serviceApplication")} label="Application jar (or zip) file"/>
+						<Input type="text" valueLink={this.linkState("serviceClassName")} label="Class name"/>
+			            <Input type="select" valueLink={this.linkState("serviceInterfaceType")} label="Service interface type" placeholder="DataFrame">
+			                <option value="DataFrame">DataFrame</option>
+			                <option value="Generic">Generic</option>
+		                </Input>
+	            	</div>;
+
+	  return(
+				<span>
+				<Button bsStyle="primary" bsSize="large" onClick={this.open}>Add ODF Service</Button>
+				  <Modal show={this.state.showModal} onHide={this.close}>
+						<Modal.Header closeButton>
+						 	<Modal.Title>Add ODF Service</Modal.Title>
+						</Modal.Header>
+						<Modal.Body>
+						{alert}
+						  <Input type="text" ref="serviceName"  valueLink={this.linkState("name")} label="Name"/>
+							<Input type="text" valueLink={this.linkState("description")} label="Description"/>
+							<Input type="text" valueLink={this.linkState("id")} label="ID"/>
+							<Input type="number" valueLink={this.linkState("parallelismCount")} label="Allowed parallel requests"/>
+							<Input type="select" valueLink={this.linkState("serviceEndpointType")} label="Type" placeholder="Spark">
+			                	<option value="Spark">Spark</option>
+			                </Input>
+							{endpointInput}
+							<Input type="text" valueLink={this.linkState("iconUrl")} label="Icon (Optional)"/>
+							<Input type="text" valueLink={this.linkState("link")} label="Link (Optional)"/>
+					  </Modal.Body>
+				    <Modal.Footer>
+				    <Button bsStyle="primary" onClick={this.addService}>Add</Button>
+				    <Button onClick={this.close}>Cancel</Button>
+				    </Modal.Footer>
+			     </Modal>
+				</span>
+			);
+		}
+});
+module.exports = {DiscoveryServiceInfo: DiscoveryServiceInfo, AddDiscoveryServiceButton: AddDiscoveryServiceButton};
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-settings.js b/odf/odf-web/src/main/webapp/scripts/odf-settings.js
new file mode 100755
index 0000000..32802c7
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-settings.js
@@ -0,0 +1,552 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//js imports
+var $ = require("jquery");
+var bootstrap = require("bootstrap");
+var React = require("react");
+var ReactDOM = require("react-dom");
+var LinkedStateMixin = require("react-addons-linked-state-mixin");
+var ReactBootstrap = require("react-bootstrap");
+
+var ODFGlobals = require("./odf-globals.js");
+var AJAXCleanupMixin = require("./odf-mixins.js");
+var configurationStore = require("./odf-utils.js").ConfigurationStore;
+var metadataStore = require("./odf-utils.js").MetadataStore;
+
+var Button = ReactBootstrap.Button;
+var Table = ReactBootstrap.Table;
+var Modal = ReactBootstrap.Modal;
+var Input = ReactBootstrap.Input;
+var Alert = ReactBootstrap.Alert;
+var Panel = ReactBootstrap.Panel;
+var Label = ReactBootstrap.Label;
+var Input = ReactBootstrap.Input;
+var Image = ReactBootstrap.Image;
+var Tabs = ReactBootstrap.Tabs;
+var Tab = ReactBootstrap.Tab;
+
+var ODFConfigPage = React.createClass({
+  mixins: [LinkedStateMixin, AJAXCleanupMixin],
+
+  getInitialState() {
+      return ({odfconfig: { odf: {} }, showDeleteConfirmationDialog: false});
+  },
+
+  componentWillMount() {
+    this.loadODFConfig();
+  },
+
+  componentWillUnmount() {
+	  this.props.alertCallback({type: ""});
+  },
+
+  // all the properties we display under the "odf" path
+  relevantODFPropList: ["instanceId", "odfUrl", "odfUser", "odfPassword", "consumeMessageHubEvents", "atlasMessagehubVcap", "runAnalysisOnImport", "runNewServicesOnRegistration"],
+
+  loadODFConfig() {
+     var req = configurationStore.readConfig(
+    	       function(data) {
+    	    	 // only "fish out" the properties we display and add them as
+    	    	 // toplevel properties to the state.
+
+    	    	 // if we have to make more complex updates this will no longer work
+    	    	 var newStateObj = {};
+    	    	 for (var i=0; i<this.relevantODFPropList.length; i++) {
+    	    		 var prop = this.relevantODFPropList[i];
+    	    		 if (data[prop]) {
+    	    			 newStateObj[prop] = data[prop];
+    	    		 }
+    	    	 }
+    	         this.setState( newStateObj );
+    	       }.bind(this),
+    	       this.props.alertCallback
+    	     );
+	 metadataStore.getProperties(
+			 function(data) {
+			     this.setState({repositoryId: data.STORE_PROPERTY_ID});
+			 }.bind(this)
+	 );
+     this.storeAbort(req.abort);
+  },
+
+  saveODFConfig() {
+	  var newConfigObj = {};
+	  for (var i=0; i<this.relevantODFPropList.length; i++) {
+		 var prop = this.relevantODFPropList[i];
+		 if (this.state[prop] != null) {
+			 newConfigObj[prop] = this.state[prop];
+		 }
+	  }
+	  var req = configurationStore.updateConfig(newConfigObj,
+			  () => {
+					  if(this.isMounted()){
+						  this.props.alertCallback({type: "success", message: "Settings saved successfully."})
+					  }
+				  },
+			  this.props.alertCallback );
+	  this.storeAbort(req.abort);
+  },
+
+  createAtlasSampleData() {
+    this.refs.sampleDataButton.disabled = true;
+    $.ajax({
+       url: ODFGlobals.metadataUrl + "/sampledata",
+       type: 'GET',
+       success: function(data) {
+    	   if(this.isMounted()){
+	    	   this.refs.sampleDataButton.disabled = false;
+	    	   this.props.alertCallback({type: "success", message: "Sample data created successfully."});
+    	   }
+	   }.bind(this),
+       error: function(xhr, status, err) {
+    	   if(this.isMounted()){
+    		   var msg = "Sample data creation failed failed: " + err.toString();
+    		   this.props.alertCallback({type: "danger", message: msg});
+    		   this.refs.sampleDataButton.disabled = false;
+    	   }
+	   }.bind(this)
+     });
+  },
+
+  deleteAllAtlasData() {
+	 this.refs.deleteAllDataButton.disabled = true;
+	 $.ajax({
+	       url: ODFGlobals.metadataUrl + "/resetalldata",
+	       type: 'POST',
+	       success: function(data) {
+	    	   if(this.isMounted()){
+		    	   this.refs.deleteAllDataButton.disabled = false;
+		    	   this.props.alertCallback({type: "success", message: "All data removed!"});
+		    	   this.closeDeleteConfirmationDialog();
+	    	   }
+		   }.bind(this),
+	       error: function(xhr, status, err) {
+	    	   if(this.isMounted()){
+	    		   var msg = "Data deletion failed: " + err.toString();
+	    		   this.props.alertCallback({type: "danger", message: msg});
+	    		   this.refs.deleteAllDataButton.disabled = false;
+		    	   this.closeDeleteConfirmationDialog();
+	    	   }
+		   }.bind(this)
+	     });
+  },
+
+  openDeleteConfirmationDialog() {
+	  this.setState( { showDeleteConfirmationDialog: true} );
+  },
+
+  closeDeleteConfirmationDialog() {
+	  this.setState( { showDeleteConfirmationDialog: false} );
+  },
+
+
+  testAtlasConnection() {
+//	  this.props.alertCallback({type: "warning", message: "Test connection not implemented yet."});
+    $.ajax({
+       url: ODFGlobals.metadataUrl + "/connectiontest",
+       type: 'GET',
+       success: function(data) {
+    	   if(this.isMounted()){
+	    	   this.props.alertCallback({type: "success", message: "Connection test successful."});
+    	   }
+	   }.bind(this),
+       error: function(xhr, status, err) {
+    	   if(this.isMounted()){
+    		   var msg = "Connection test failed: " + err.toString();
+    		   this.props.alertCallback({type: "danger", message: msg});
+    	   }
+	   }.bind(this)
+     });
+  },
+
+  notificationValue() {
+      if (this.state.runAnalysisOnImport) {
+          return "create";
+      }
+      return "none";
+  },
+
+  notificationsChanged() {
+      var newValue = this.refs.runAnalysisInput.getValue();
+      var val = (newValue != "none");
+      this.setState({runAnalysisOnImport: val});
+  },
+
+  render() {
+    var divStyle = {
+      marginLeft: "20px"
+    };
+    return (
+      <div>
+      	<form>
+	      <fieldset className="form-group  label-floating">
+		        <legend>General Settings</legend>
+		        <br/>
+		        <h4>Instance</h4>
+		          <div style={divStyle}>
+		            <Input type="text" label="ODF Instance ID" valueLink={this.linkState("instanceId")} disabled/>
+		            <Input type="text" label="ODF URL" valueLink={this.linkState("odfUrl")}/>
+		            <Input type="text" label="ODF User ID" valueLink={this.linkState("odfUser")}/>
+		            <Input type="password" label="ODF Password" valueLink={this.linkState("odfPassword")}/>
+		          </div>
+		        <hr/>
+		        <h4>Metadata store</h4>
+		        <div style={divStyle}>
+		          <Input type="text" label="Repository ID" valueLink={this.linkState("repositoryId")} disabled/>
+
+		          <div style={divStyle} className="checkbox">
+		           <label>
+	                <input ref="consumeMessageHubEvents" type="checkbox" checkedLink={this.linkState("consumeMessageHubEvents")} />
+	                <span className="checkbox-material">
+	                	<span className="check"></span>
+	                </span>
+	                &nbsp;&nbsp;Consume events from Messagehub instead of a Kafka instance
+	              </label>
+	            </div>
+		          <Input type="text" label="Atlas Messagehub VCAP" disabled={(this.state && !this.state["consumeMessageHubEvents"])} valueLink={this.linkState("atlasMessagehubVcap")}/>
+		          <Button bsStyle="primary" onClick={this.testAtlasConnection}>Test connection</Button>
+		          <Button bsStyle="success" ref="sampleDataButton" onClick={this.createAtlasSampleData}>Create Atlas sample data</Button>
+		          <Button bsStyle="danger" ref="deleteAllDataButton" onClick={this.openDeleteConfirmationDialog}>Delete all Atlas data</Button>
+		          <Modal show={this.state.showDeleteConfirmationDialog} onHide={this.closeDeleteConfirmationDialog}>
+		            <Modal.Header closeButton>
+		              <Modal.Title>Confirm deletion</Modal.Title>
+		            </Modal.Header>
+		            <Modal.Body>
+		              <h4>Are you sure you want to delete all data from the metadata repository?</h4>
+		            </Modal.Body>
+		              <Modal.Footer>
+		              <Button onClick={this.deleteAllAtlasData}>Delete all Data</Button>
+		              <Button onClick={this.closeDeleteConfirmationDialog}>Close</Button>
+		            </Modal.Footer>
+		          </Modal>
+		        </div>
+		        <hr/>
+		        <h4>Notifications</h4>
+		        <div style={divStyle}>
+		          <Input type="select" label="Run analysis automatically" ref="runAnalysisInput" onChange={this.notificationsChanged} value={this.notificationValue()}>
+		             <option value="none">Never</option>
+                     <option value="create">On create</option>
+		          </Input>
+		        </div>
+		        <div style={divStyle} className="checkbox">
+		           <label>
+	                <input ref="runServicesOnRegInput" type="checkbox" checkedLink={this.linkState("runNewServicesOnRegistration")} />
+	                <span className="checkbox-material">
+	                	<span className="check"></span>
+	                </span>
+	                &nbsp;&nbsp;Automatically run all newly registered services in order to keep asset metadata up-to-date
+	              </label>
+	            </div>
+		        <hr/>
+		        <Button className="btn-raised" bsStyle="primary" onClick={this.saveODFConfig}>Save Settings</Button>
+		        <Button onClick={this.loadODFConfig}>Reload</Button>
+	        </fieldset>
+        </form>
+      </div>);
+  }
+
+});
+
+
+var SparkConfigPage = React.createClass({
+  mixins: [LinkedStateMixin, AJAXCleanupMixin],
+
+  getInitialState() {
+      return ({"clusterMasterUrl": ""});
+  },
+
+  componentWillMount() {
+    this.loadODFConfig();
+  },
+
+  componentWillUnmount() {
+	  this.props.alertCallback({type: ""});
+  },
+
+   loadODFConfig() {
+     var req = configurationStore.readConfig(
+    	       function(data) {
+    	    	 var sparkConfig = {};
+    	    	 if(data.sparkConfig != null){
+    	    		 sparkConfig = data.sparkConfig;
+    	    	 }
+    	    	 this.setState(sparkConfig);
+    	       }.bind(this),
+    	       this.props.alertCallback
+    	     );
+     this.storeAbort(req.abort);
+  },
+
+  saveODFConfig() {
+	  var sparkConfig = {clusterMasterUrl: this.state.clusterMasterUrl};
+	  var req = configurationStore.updateConfig({"sparkConfig" : sparkConfig},
+			  () => {
+					  if(this.isMounted()){
+						  this.props.alertCallback({type: "success", message: "Spark config saved successfully."})
+					  }
+				  },
+			  this.props.alertCallback );
+	  this.storeAbort(req.abort);
+  },
+
+  render() {
+    var divStyle = {
+      marginLeft: "20px"
+    };
+
+	var sparkSettings =  <div>
+						<h4>Local spark cluster</h4>
+		    			<div style={divStyle}>
+						  <Input type="text" label="Cluster master url" valueLink={this.linkState("clusterMasterUrl")}/>
+						</div>
+					 </div>;
+
+
+    return (
+      <div>
+      	<form>
+	      <fieldset className="form-group  label-floating">
+		        <legend>Spark configuration</legend>
+		        	{sparkSettings}
+		        <Button className="btn-raised" bsStyle="primary" onClick={this.saveODFConfig}>Save Settings</Button>
+		        <Button onClick={this.loadODFConfig}>Reload</Button>
+	        </fieldset>
+        </form>
+      </div>);
+  }
+
+});
+
+
+var PropertyAddButton = React.createClass({
+  getInitialState() {
+     return ({showModal: false});
+  },
+
+  close() {
+    this.setState({ showModal: false });
+  },
+
+  save() {
+    var newPropObj = {};
+    newPropObj[this.state.name] = this.state.value;
+    var updateConfig = { userDefined: newPropObj };
+    configurationStore.updateConfig(updateConfig,
+    		() => { this.props.successCallback();
+	                this.props.alertCallback({type: "success", message: "User-defined property added successfully."})
+    		},
+    		this.props.alertCallback
+    );
+  },
+
+  saveAndClose() {
+    this.save();
+    this.close();
+  },
+
+  open() {
+    this.setState({ showModal: true });
+  },
+
+  handleTextChange() {
+    this.setState({
+          name: this.refs.inputName.getValue(),
+          value: this.refs.inputValue.getValue()
+        });
+  },
+
+  handleClick() {
+      this.open();
+  },
+
+  render: function() {
+    return (<span>
+    <Button bsStyle="primary" className="btn-raised" onClick={this.handleClick}>Add</Button>
+      <Modal show={this.state.showModal} onHide={this.close}>
+          <Modal.Header closeButton>
+             <Modal.Title>Add Property</Modal.Title>
+          </Modal.Header>
+          <Modal.Body>
+             <Input type="text" ref="inputName" label="Name" onChange={this.handleTextChange}></Input>
+             <Input type="text" ref="inputValue" label="Value" onChange={this.handleTextChange}></Input>
+         </Modal.Body>
+         <Modal.Footer>
+         <Button bsStyle="primary" onClick={this.saveAndClose}>Save</Button>
+         <Button onClick={this.close}>Cancel</Button>
+         </Modal.Footer>
+			</Modal>
+      </span>);
+  }
+
+});
+
+
+var PropertyRemoveButton = React.createClass({
+
+   handleClick() {
+      var newPropObj = {};
+      newPropObj[this.props.name] = null;
+      var updateConfig = { userDefined: newPropObj };
+      configurationStore.updateConfig(updateConfig,
+    		  () => { this.props.successCallback();
+		              this.props.alertCallback({type: "success", message: "User-defined property removed successfully."});
+    		  },
+    		  this.props.alertCallback
+      );
+   },
+
+   render() {
+     return (
+    		 <Button onClick={this.handleClick}>Remove</Button>
+    );
+   }
+
+});
+var PropertyEditButton = React.createClass({
+
+   getInitialState() {
+      return ({showModal: false});
+   },
+
+   close() {
+     this.setState({ showModal: false });
+   },
+
+   save() {
+     var newPropObj = {};
+     newPropObj[this.props.name] = this.state.value;
+     var updateConfig = { userDefined: newPropObj };
+     configurationStore.updateConfig(updateConfig,
+    		 () => { this.props.successCallback();
+    		         this.props.alertCallback({type: "success", message: "User-defined property saved successfully."})
+    		 }, this.props.alertCallback
+     );
+   },
+
+   saveAndClose() {
+     this.save();
+     this.close();
+   },
+
+   open() {
+     this.setState({ showModal: true });
+   },
+
+   handleTextChange() {
+     this.setState({
+           value: this.refs.input.getValue()
+         });
+   },
+
+   handleClick() {
+       this.open();
+   },
+
+   render: function() {
+     return (
+       <span>
+        <Button bsStyle="primary" onClick={this.handleClick}>Edit</Button>
+        <Modal show={this.state.showModal} onHide={this.close}>
+            <Modal.Header closeButton>
+               <Modal.Title>Edit Property</Modal.Title>
+            </Modal.Header>
+            <Modal.Body>
+               <h4>Enter new value for property ''{this.props.name}''</h4>
+               <Input type="text" ref="input" onChange={this.handleTextChange} defaultValue={this.props.value}></Input>
+           </Modal.Body>
+           <Modal.Footer>
+           <Button bsStyle="primary" onClick={this.saveAndClose}>Save</Button>
+           <Button onClick={this.close}>Cancel</Button>
+           </Modal.Footer>
+        </Modal>
+       </span>);
+   }
+});
+
+var UserDefinedConfigPage = React.createClass({
+   mixins : [AJAXCleanupMixin],
+
+   getInitialState: function() {
+      return {odfconfig: { userDefined: {}}};
+   },
+
+   loadUserDefConfig: function() {
+     var req = configurationStore.readConfig(
+       function(data) {
+         this.setState( {odfconfig: data} );
+       }.bind(this),
+       this.props.alertCallback
+     );
+
+     this.storeAbort(req.abort);
+   },
+
+   componentDidMount: function() {
+     this.loadUserDefConfig();
+   },
+
+   componentWillUnmount : function() {
+	  this.props.alertCallback({type: ""});
+   },
+
+   render: function() {
+     var tableContents = $.map(
+           this.state.odfconfig.userDefined,
+           function(value, name) {
+        	   if (value) {
+        		 var tdBtnFixStyle = { paddingTop : "26px"};
+
+                 return <tr key={name}>
+                          <td style={tdBtnFixStyle}>{name}</td>
+                          <td style={tdBtnFixStyle}>{value}</td>
+                          <td><PropertyEditButton name={name} value={value} successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
+                              <PropertyRemoveButton name={name} successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
+                          </td>
+                        </tr>;
+        	   }
+        	   // empty element
+        	   return null;
+           }.bind(this));
+     return (
+       <div>
+	       <form>
+		      <fieldset className="form-group  label-floating">
+		      <legend>
+			       User-defined properties
+		       </legend>
+		       <Table responsive>
+		          <thead>
+		            <tr>
+		              <th>Name</th>
+		              <th>Value</th>
+		              <th></th>
+		            </tr>
+		          </thead>
+		          <tbody>
+		             {tableContents}
+		          </tbody>
+		       </Table>
+		       <PropertyAddButton successCallback={this.loadUserDefConfig} alertCallback={this.props.alertCallback}/>
+	       </fieldset>
+	       </form>
+       </div>);
+   }
+
+});
+
+
+
+module.exports = {ODFConfigPage : ODFConfigPage, SparkConfigPage : SparkConfigPage, UserDefinedConfigPage: UserDefinedConfigPage} ;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-statistics.js b/odf/odf-web/src/main/webapp/scripts/odf-statistics.js
new file mode 100755
index 0000000..ea0e151
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-statistics.js
@@ -0,0 +1,413 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var d3 = require("d3");
+var ReactBootstrap = require("react-bootstrap");
+var ReactD3 = require("react-d3-components");
+
+var AJAXCleanupMixin = require("./odf-mixins.js");
+
+var Image = ReactBootstrap.Image;
+var Panel = ReactBootstrap.Panel;
+var BarChart = ReactD3.BarChart;
+var PieChart = ReactD3.PieChart;
+var LineChart = ReactD3.LineChart;
+
+var ODFGlobals = require("./odf-globals.js");
+
+const GRAPH_REFRESH_DELAY_MS = 4000;
+
+var ODFStats = {
+	CurrentThreadGraph : React.createClass({
+
+		tooltipLine : function(label, data) {
+	        return "Running threads " + data.y;
+	    },
+
+	    xScale : function() {
+	    	return "";
+	    },
+
+		render : function(){
+			var lineChart = null;
+
+			if(this.props.threadValues){
+
+				var data = [
+				        {
+				        	label: 'Thread count',
+				            values: [ ]
+				         }
+				    ];
+
+				for(var no = 0; no < this.props.threadValues.length; no++){
+					data[0].values.push({x : no + 1, y : this.props.threadValues[no]});
+				};
+
+				lineChart = <LineChart
+				                data={data}
+								width={400}
+				                height={400}
+				                margin={{top: 10, bottom: 50, left: 50, right: 10}}
+				                tooltipContained
+			                    tooltipHtml={this.tooltipLine}
+				                shapeColor={"red"}
+				 				xAxis={{tickValues: []}}
+								/>;
+			}
+
+			return (
+					<div>
+						<h4>Currently running threads in ODF</h4>
+						{lineChart}
+					</div>);
+		}
+
+
+	}),
+
+	SystemDiagnostics : React.createClass({
+		mixins : [AJAXCleanupMixin],
+
+		getODFStatus : function(){
+			var currentState = this.state;
+
+			const url = ODFGlobals.engineUrl + "/status";
+	        var req = $.ajax({
+	            url: url,
+	            contentType: "application/json",
+	            dataType: 'json',
+	            type: 'GET',
+	            success: function(data) {
+	            	if(currentState == null){
+	            		currentState = { threadValues : [0]};
+	            	}
+	            	currentState.threadValues.push(data.threadManagerStatus.length);
+	                if(currentState.threadValues.length > 5){
+	                	currentState.threadValues.splice(0, 1);
+	                }
+
+	            	this.setState(currentState);
+	            }.bind(this),
+	            error: function(xhr, status, err) {
+	              var msg = "ODF status request failed, " + err.toString();
+	              this.props.alertCallback({type: "danger", message: msg});
+	            }.bind(this)
+	        });
+
+	        this.storeAbort(req.abort);
+		},
+
+		componentWillMount : function() {
+			this.getODFStatus();
+		},
+
+		componentWillUnmount () {
+		    this.refreshInterval && clearInterval(this.refreshInterval);
+		    this.refreshInterval = false;
+		},
+
+		componentWillReceiveProps: function(nextProps){
+			if(!nextProps.visible){
+				 this.refreshInterval && clearInterval(this.refreshInterval);
+				 this.refreshInterval = false;
+			}else if(!this.refreshInterval){
+				this.refreshInterval = window.setInterval(this.getODFStatus, GRAPH_REFRESH_DELAY_MS);
+			}
+		},
+
+		tooltipLine : function(label, data) {
+	        return "Running threads " + data.y;
+	    },
+
+	    xScale : function() {
+	    	return "";
+	    },
+
+		render : function(){
+			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
+
+			var threadGraph = null;
+			if(this.state){
+				progressIndicator = null;
+				threadGraph = <ODFStats.CurrentThreadGraph threadValues={this.state.threadValues} />;
+			}
+
+			return (
+					<div>
+						{progressIndicator}
+						{threadGraph}
+					</div> );
+		}
+	}),
+
+	TotalAnalysisGraph : React.createClass({
+		mixins : [AJAXCleanupMixin],
+
+		getAnalysisStats : function() {
+			const url = ODFGlobals.analysisUrl + "/stats";
+	        var req = $.ajax({
+	            url: url,
+	            contentType: "application/json",
+	            dataType: 'json',
+	            type: 'GET',
+	            success: function(data) {
+	               this.setState(data);
+	            }.bind(this),
+	            error: function(xhr, status, err) {
+	              var msg = "Analysis stats request failed, " + err.toString();
+	              this.props.alertCallback({type: "danger", message: msg});
+	            }.bind(this)
+	        });
+
+	        this.storeAbort(req.abort);
+		},
+
+		componentWillMount : function() {
+			this.getAnalysisStats();
+		},
+
+		componentWillUnmount () {
+		    this.refreshInterval && clearInterval(this.refreshInterval);
+		    this.refreshInterval = false;
+		},
+
+		componentWillReceiveProps: function(nextProps){
+			if(!nextProps.visible){
+				 this.refreshInterval && clearInterval(this.refreshInterval);
+				 this.refreshInterval = false;
+			}else if(!this.refreshInterval){
+				this.refreshInterval = window.setInterval(this.getAnalysisStats, GRAPH_REFRESH_DELAY_MS);
+			}
+		},
+
+		tooltipPie : function(x, y) {
+		    return y.toString() + " absolute";
+		},
+
+		render : function() {
+			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
+			var pieChart = null;
+
+			if(this.state){
+				progressIndicator = null;
+				var succ = (this.state.success ? this.state.success : (this.state.failure ? 0 : 100));
+				var fail = (this.state.failure ? this.state.failure : 0);
+				var onePercent = (succ + fail) / 100;
+
+				var succVal = (onePercent == 0 ? 100 : (succ / onePercent)).toFixed(2);
+				var failVal = (onePercent == 0 ? 0 : (fail / onePercent)).toFixed(2);
+
+				var pieData = {label: "Total success and failure",
+								values : [{x: "Finished requests (" + succVal + " %)", y: succ},
+								          {x: "Failed requests (" + failVal + " %)", y: fail}
+									]
+								};
+
+				var colorScale = d3.scale.ordinal().range(["lightgreen", "#F44336"]);
+
+				var pieStyle = {opacity : "1 !important"};
+				pieChart = (<PieChart
+	                    data={pieData}
+	                    width={800}
+	                    height={400}
+	                    margin={{top: 10, bottom: 10, left: 200, right: 200}}
+	                    tooltipHtml={this.tooltipPie}
+	                    tooltipOffset={{top: 175, left: 200}}
+						tooltipMode={"fixed"}
+						style={pieStyle}
+   				      	colorScale={colorScale}
+	                    />);
+			}
+			return (
+					<div>
+						<h4>Total analysis requests and failures</h4>
+			           	{progressIndicator}
+						{pieChart}
+						<hr />
+					</div>);
+		}
+	}),
+
+	PerServiceStatusGraph : React.createClass({
+		mixins : [AJAXCleanupMixin],
+
+		getServiceStatus : function() {
+			const url = ODFGlobals.servicesUrl + "/status";
+	        var req = $.ajax({
+	            url: url,
+	            contentType: "application/json",
+	            dataType: 'json',
+	            type: 'GET',
+	            success: function(data) {
+	               this.setState(data);
+	            }.bind(this),
+	            error: function(xhr, status, err) {
+	              var msg = "Analysis stats request failed, " + err.toString();
+	              this.props.alertCallback({type: "danger", message: msg});
+	            }.bind(this)
+	        });
+
+	        this.storeAbort(req.abort);
+		},
+
+		componentWillMount : function() {
+			this.getServiceStatus();
+		},
+
+		componentWillUnmount () {
+		    this.refreshInterval && clearInterval(this.refreshInterval);
+		    this.refreshInterval = false;
+		},
+
+		componentWillReceiveProps: function(nextProps){
+			if(!nextProps.visible){
+				 this.refreshInterval && clearInterval(this.refreshInterval);
+				 this.refreshInterval = false;
+			}else if(!this.refreshInterval){
+				this.refreshInterval = window.setInterval(this.getServiceStatus, GRAPH_REFRESH_DELAY_MS);
+			}
+		},
+
+		tooltip : function(x, y0, y, total) {
+			var barData = this.getBarData();
+			var text = y;
+			var name = null;
+			if(barData && barData.length > 0){
+				$.map(barData, function(res){
+					var bData = barData;
+					$.map(res.values, function(val){
+						if(val.x == x && val.y == y){
+							name = val.fullName;
+						}
+					});
+				});
+			}
+
+			var tooltipStyle = {top : "-20px", position: "absolute", left: "-100px", "minWidth" : "350px"};
+
+			if(name == null){
+				tooltipStyle.left = 0;
+			}
+
+		    return (
+		    		<div style={tooltipStyle}>
+		    			<span>{name}, {text}</span>
+		    		</div>
+		    		);
+		},
+
+		getBarData : function(){
+			if(this.state && !$.isEmptyObject(this.state)){
+				var currentState = this.state;
+				var statusMap = {};
+				$.map(currentState, function(res){
+					var states = res.statusCountMap;
+					$.each(states, function(state, count){
+						var currentArr = statusMap[state];
+						if(currentArr === undefined){
+							currentArr = [];
+						}
+
+						var lbl = (res.name ? res.name : res.id);
+						//only shorten names if more than 1 bar is displayed
+						if(currentState && Object.keys(currentState) && Object.keys(currentState).length > 1 && lbl && lbl.length > 17){
+							lbl = lbl.substring(0, 17) + "..";
+						}
+
+						currentArr.push({"x" : lbl, "y": count, "fullName" : res.name});
+						statusMap[state] = currentArr;
+					});
+				});
+
+				var barData = [];
+
+				$.each(statusMap, function(key, val){
+					barData.push({"label" : key, "values" : val});
+				});
+
+				barData = barData.reverse();
+				return barData;
+			}else{
+				return [ { "label" : "No data available", "values" : [{"x" : "No data availbale", "y" : 0}]}];
+			}
+		},
+
+		getLegend : function(barData, colors){
+			var lbls = [];
+			for(var no = 0; no < barData.length; no++){
+				lbls.push(<div key={no} ><span style={{color: colors[no]}}>{barData[no].label}</span><br/></div>);
+			};
+
+			return (
+					<div style={{float:"right"}}>
+						{lbls}
+					</div>
+				);
+		},
+
+		render : function() {
+			var progressIndicator = <Image src="img/lg_proc.gif" rounded />;
+			var barChart = null;
+
+			if(this.state){
+				progressIndicator = null;
+				var barData = this.getBarData();
+
+				var barStyle = {marginTop: "50px"};
+
+				var barChart = (<BarChart
+				  width={400}
+		          height={400}
+		          margin={{top: 70, bottom: 50, left: 50, right: 10}}
+				  tooltipHtml={this.tooltip}
+			      tooltipMode={"element"}/>);
+
+				//cancelled, initialized, error, running, in queue, finished
+				var colors = ["black", "#F44336", "lightgreen", "blue", "lightblue", "grey"];
+				var colorScale = d3.scale.ordinal().range(colors);
+
+				if(barData != null){
+					var barWidth = (Object.keys(this.state).length >= 2 ? Object.keys(this.state).length * 200 : 400);
+
+					barChart = (
+								<div style={barStyle}>
+									{this.getLegend(barData, colors)}
+									<BarChart
+									  data={barData}
+									  width={barWidth}
+							          height={400}
+								      colorScale={colorScale}
+							          margin={{top: 30, bottom: 50, left: 50, right: 10}}
+									  tooltipHtml={this.tooltip}
+								      tooltipMode={"element"}
+									/>
+								</div>
+							);
+				}
+			}
+
+			return (
+					<div>
+						<h4>Analysis runs per service</h4>
+			           	{progressIndicator}
+						{barChart}
+					</div>);
+			}
+		})
+}
+
+module.exports = ODFStats;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js b/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js
new file mode 100755
index 0000000..a6bc381
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-ui-spec.js
@@ -0,0 +1,316 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ReactBootstrap = require("react-bootstrap");
+
+var Label = ReactBootstrap.Label;
+var ListGroup = ReactBootstrap.ListGroup;
+var ListGroupItem = ReactBootstrap.ListGroupItem;
+var Glyphicon = ReactBootstrap.Glyphicon;
+
+/*
+ * for every data type a UI specification can be created.
+ * A UI specification is an array of objects.
+ *
+ * Normally, the key of a property will be used to find a matching ui spec.
+ * This can be overwritten by defining the uiSpec attribute on a property object.
+ *
+ * Each object requires a key to identify the property and a label that will be displayed
+ * In order to manipulate the value or how it is displayed, a property object can pass a function to the func attribute.
+ * This function will be called with the property value and the object as parameters.
+ *
+ * Properties with an array as their value will automatically be displayed in a grid.
+ * A UI specification that is used for a grid can have a property object with the attribute sort:true, causing the tablet to be sorted alphabetically on this property
+*/
+
+var UISpec = {
+
+		DefaultDocument : {
+			attributes: [
+			            {key: "name", label: "Name"},
+			            {key: "description", label: "Description"},
+			            {key: "type", label: "Type"}],
+		    icon: <Glyphicon glyph="question-sign" />
+		},
+
+		DefaultDocuments : {
+			attributes: [
+			            {key: "name", label: "Name"},
+			            {key: "description", label: "Description"},
+			            {key: "type", label: "Type"}],
+		    icon: <Glyphicon glyph="question-sign" />
+		},
+
+		Document : {
+			attributes: [{key: "reference.id", label: "ID"},
+		            {key: "name", label: "Name"},
+		            {key: "type", label: "Type"}],
+		    icon: <Glyphicon glyph="file" />
+		},
+
+		Documents : {
+			attributes: [{key: "name", label: "Name"},
+		            {key: "description", label: "Description"},
+		            {key: "columns", label: "Columns"} ,
+		            {key: "annotations", label: "Annotations",
+		            	 func: function(val){
+		            		 if(!val){
+		            			 return 0;
+		            		 }
+		            		 return val.length;
+		            	 }
+		             }],
+		   icon: <Glyphicon glyph="file" />
+		},
+
+		DataFile : {
+			attributes: [{key: "name", label: "Name"},
+			          {key: "description", label: "Description"},
+			          {key: "columns", label: "Columns"} ,
+			          {key: "annotations", label: "Annotations"}],
+			icon: <Glyphicon glyph="list-alt" />
+		},
+
+		DataFiles: {
+			attributes: [{key: "name", label: "Name"},
+		             {key: "columns", label: "Columns"} ,
+		             {key: "annotations", label: "Annotations",
+		            	 func: function(val){
+		            		 if(!val){
+		            			 return 0;
+		            		 }
+		            		 return val.length;
+		            	 }
+		             }],
+		    icon: <Glyphicon glyph="list-alt" />
+		},
+
+		Table : {
+			attributes: [{key: "schema", label: "Schema"},
+		         {key: "name", label: "Name"},
+		         {key:"description", label: "Description"},
+		         {key:"columns", label: "Columns"} ,
+		         {key: "annotations", label: "Annotations"}],
+     	    icon: <Glyphicon glyph="th" />
+		},
+
+		Tables : {
+			attributes: [ // {key: "schema", label: "Schema"},
+	          {key: "name", label: "Name"},
+		          {key: "columns", label: "Columns"} ,
+		          {key: "annotations", label: "Annotations",
+		        	  func: function(val){
+		        		  if(!val){
+		        			  return 0;
+		        			  }
+		        		  return val.length;
+		        		}
+		          }],
+     	    icon: <Glyphicon glyph="th" />
+		},
+
+		column : {
+			attributes: [{key: "name", label: "Name"},
+		          {key: "dataType", label: "Datatype"},
+		          {key: "annotations", label: "Annotations"}],
+
+		    icon: <Glyphicon glyph="th-list" />
+		},
+
+		columns : {
+			attributes: [{key: "name", label: "Name", sort: true},
+		           {key: "dataType", label: "Datatype"},
+		           {key: "annotations", label: "Annotations",
+			        	  func: function(val){
+			        		  if(!val){
+			        			  return 0;
+			        		  }
+			        		  return val.length;
+			        		}
+			          }],
+		    icon: <Glyphicon glyph="th-list" />
+		},
+
+		//InferredDataClass	AnalysisRun	AnnotationType	JavaClass	Annotations	AnnotatedObject	Reference	JsonProperties
+		annotation : {
+			attributes: [{key: "annotationType", label: "Annotation type"},
+
+		              // see Infosphere DQ service: ColumnAnalysisTableAnnotation
+                      {key: "dataClassDistribution", label: "Data Class Distribution",
+		                  func: function(val) {
+		                      if (val) {
+		                          return <span>{JSON.stringify(val)}</span>;
+		                      }
+		                  }
+                      },
+                      // see Infosphere DQ service: ColumnAnalysisColumnAnnotation
+		              {key: "inferredDataClass", label: "Data Class",
+		                func: function(val) {
+		                     if (val) {
+                                if(val.className){
+                                    var confidence = "";
+                                    if (val.confidenceThreshold) {
+                                        confidence = " ("+val.confidenceThreshold+")";
+                                    }
+                                    return <span>{val.className}{confidence}</span>;
+                                }
+                                return <span>{JSON.stringify(val)}</span>;
+		                     }
+                          }
+		              },
+                      {key: "qualityScore", label: "Data Quality Score"},
+
+		              // see alchemy taxonomy service: TaxonomyDiscoveryService.TaxonomyAnnotation
+		              {key: "label", label: "Category"},
+                      {key: "score", label: "Score"},
+
+		              {key: "analysisRun", label:"Analysis"},
+		              {key: "jsonProperties", label: "Properties"}
+		              ],
+  		    icon: <Glyphicon glyph="tag" />
+		},
+
+		annotations : {
+			attributes: [{key: "annotationType", label: "Annotation type"},
+			             {key: "analysisRun", label:"Analysis"}],
+		  	icon: <Glyphicon glyph="tag" />
+		},
+
+	    request : {
+	    	attributes:[
+	               {key: "request.id", label: "Request ID"},
+	               {key: "state", label: "Status",
+	            	   func: function(val){
+	            		   var btnCss = {};
+	                       var statusLabel = <Label bsStyle="warning">Unknown</Label>;
+	                       if (val == "ACTIVE") {
+	                          statusLabel = <Label bsStyle="info">Active</Label>;
+	                       } else if (val== "QUEUED") {
+	                          statusLabel = <Label bsStyle="info">Queued</Label>;
+	                       } else if (val== "CANCELLED") {
+	                          statusLabel = <Label bsStyle="warning">Cancelled</Label>;
+	                       } else if (val== "FINISHED") {
+	                          statusLabel = <Label bsStyle="success">Finished</Label>;
+	                       } else if (val== "ERROR") {
+	                          statusLabel = <Label bsStyle="danger">Error</Label>;
+	                       }
+	                       return statusLabel;
+	            	   }},
+            	   {key: "request.dataSets", label: "Data sets", uiSpec: "DefaultDocuments"},
+            	   {key: "totalTimeOnQueues", label: "Total time on queues", func: function(val){
+            		   if(val){
+            			   	var x = val / 1000;
+            			    var seconds = Math.floor(x % 60);
+            			    x /= 60;
+            			    var minutes = Math.floor(x % 60);
+            			    x /= 60;
+            			    var hours = Math.floor(x % 24);
+
+            			    return hours + "h " + minutes + "m " + seconds + " s";
+            		   }
+            		   return "";
+            	   }},
+            	   {key: "totalTimeProcessing", label: "Total time processing", func: function(val){
+            		   if(val){
+            			   	var x = val / 1000;
+            			    var seconds = Math.floor(x % 60);
+            			    x /= 60;
+            			    var minutes = Math.floor(x % 60);
+            			    x /= 60;
+            			    var hours = Math.floor(x % 24);
+
+            			    return hours + "h " + minutes + "m " + seconds + " s";
+            		   }
+            		   return "";
+            	   }},
+            	   {key: "totalTimeStoringAnnotations", label: "Total time storing results", func: function(val){
+            		   if(val){
+            			   	var x = val / 1000;
+            			    var seconds = Math.floor(x % 60);
+            			    x /= 60;
+            			    var minutes = Math.floor(x % 60);
+            			    x /= 60;
+            			    var hours = Math.floor(x % 24);
+
+            			    return hours + "h " + minutes + "m " + seconds + " s";
+            		   }
+            		   return "";
+            	   }},
+	               {key: "serviceRequests", label: "Service Sequence", func: function(val, obj){
+	            	   var serviceNames = [];
+	            	   var services = [];
+	            	   for (var i=0; i<val.length; i++) {
+	                       var dsreq = val[i];
+	                       var dsName = dsreq.discoveryServiceName;
+	                       if(serviceNames.indexOf(dsName) == -1){
+	                    	   serviceNames.push(dsName);
+	                    	   services.push(<span key={dsName}>{dsName}<br/></span>);
+	                       }
+	                   }
+
+	                   return <em>{services}</em>;
+	               	}
+	               },
+	               {key: "details", label: "Status Details"}
+	               ],
+	   		  	icon: <Glyphicon glyph="play-circle" />
+	    },
+
+	    requests : {
+	    	attributes: [
+		               {key: "request.id", label: "Request ID"},
+		               {key: "status", label: "Status",
+		            	   func: function(val){
+		                       var statusLabel = <Label bsStyle="warning">Unknown</Label>;
+		                       if (val == "INITIALIZED") {
+		                          statusLabel = <Label bsStyle="info">Initialized</Label>;
+		                       } else if (val== "IN_DISCOVERY_SERVICE_QUEUE") {
+		                          statusLabel = <Label bsStyle="info">Queued</Label>;
+		                       } else if (val== "DISCOVERY_SERVICE_RUNNING") {
+		                           statusLabel = <Label bsStyle="info">Running</Label>;
+		                       } else if (val== "CANCELLED") {
+		                          statusLabel = <Label bsStyle="warning">Cancelled</Label>;
+		                       } else if (val== "FINISHED") {
+		                          statusLabel = <Label bsStyle="success">Finished</Label>;
+		                       } else if (val== "ERROR") {
+		                          statusLabel = <Label bsStyle="danger">Error</Label>;
+		                       }
+		                       return statusLabel;
+		            	   }},
+		               {key: "lastModified", label: "Last modified", func: function(val){
+		            	   return new Date(val).toLocaleString();
+		               }},
+		               {key: "discoveryServiceRequests", label: "Service sequence", func: function(val, obj){
+		            	   var serviceNames = [];
+		            	   var services = [];
+		            	   for (var i=0; i<val.length; i++) {
+		                       var dsreq = val[i];
+		                       var dsName = dsreq.discoveryServiceName;
+		                       if(serviceNames.indexOf(dsName) == -1){
+		                    	   serviceNames.push(dsName);
+		                    	   services.push(<span key={dsName}>{dsName}<br/></span>);
+		                       }
+		                   }
+
+		                   return <ListGroup>{services}</ListGroup>;
+		               }},
+		               {key: "statusDetails", label: "Status Details"}
+		               ],
+	   	icon: <Glyphicon glyph="play-circle" />
+	    }
+};
+
+module.exports = UISpec;
diff --git a/odf/odf-web/src/main/webapp/scripts/odf-utils.js b/odf/odf-web/src/main/webapp/scripts/odf-utils.js
new file mode 100755
index 0000000..5684556
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/scripts/odf-utils.js
@@ -0,0 +1,338 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var $ = require("jquery");
+var React = require("react");
+var ODFGlobals = require("./odf-globals.js");
+
+var Utils= {
+
+	arraysEqual : function(arr1, arr2){
+		var a = arr1;
+		var b = arr2;
+		if(arr1 == null){
+			if(arr2 == null){
+				return true;
+			}
+			return false;
+		}else{
+			if(arr2 == null){
+				return false;
+			}
+		}
+
+		if(a.length != b.length){
+			return false;
+		}
+
+		var equal = true;
+		$.each(a, function(key, val){
+			if(a[key] && !b[key]){
+				equal = false;
+				return;
+			}
+			if(val && typeof val == "object"){
+				equal = this.arraysEqual(val, b[key]);
+				return;
+			}else{
+				if(val != b[key]){
+					equal = false;
+					return;
+				}
+			}
+		}.bind(this));
+		return equal;
+	},
+
+	AnnotationStoreHelper : {
+		loadAnnotationsForRequest : function(analysisRequestId, successCallback, errorCallback) {
+		    var url = ODFGlobals.annotationsUrl + "?analysisRequestId=" + analysisRequestId;
+            return $.ajax({
+               url: url,
+               type: 'GET',
+               success: function(data) {
+                   if(successCallback){
+                       successCallback(data);
+                   }
+               },
+               error: function(xhr, status, err) {
+                   if(errorCallback){
+                       errorCallback(err);
+                   }
+               }
+            });
+		}
+	},
+
+	AtlasHelper : {
+
+		loadAtlasAssets : function(assets, successCallback, errorCallback){
+			var reqs = [];
+			$.each(assets, function(key, val){
+				reqs.push(this.loadAtlasAsset(val, successCallback, errorCallback));
+			}.bind(this));
+			return reqs;
+		},
+
+		loadMostRecentAnnotations : function(asset, successCallback, errorCallback) {
+		    var url = ODFGlobals.annotationsUrl + "/newestAnnotations/" + encodeURIComponent(JSON.stringify({repositoryId: asset.repositoryId, id: asset.id}));
+            return $.ajax({
+               url: url,
+               type: 'GET',
+               success: function(data) {
+                   if(successCallback){
+                       successCallback(data);
+                   }
+               },
+               error: function(xhr, status, err) {
+                   if(errorCallback){
+                       errorCallback(err);
+                   }
+               }
+            });
+		},
+
+		loadRelationalDataSet: function(dataSet, successCallback, errorCallback) {
+			var url = ODFGlobals.metadataUrl + "/asset/" + encodeURIComponent(JSON.stringify({repositoryId: dataSet.reference.repositoryId, id: dataSet.reference.id})) + "/columns";
+			return $.ajax({
+				url: url,
+				type: 'GET',
+				error: function(xhr, status, err) {
+					if(errorCallback){
+						errorCallback(err);
+					}
+				}
+			}).then( function(cols){
+				if(!cols){
+					successCallback([]);
+					return [];
+				}
+				var requests = [];
+				var colRefs = [];
+				$.each(cols, function(key, val){
+					var req = Utils.AtlasHelper.getColAnnotations(val);
+					requests.push(req);
+					colRefs.push(val.reference);
+				}.bind(this));
+				dataSet.columns = colRefs;
+				$.when.apply(undefined, requests).done(function(){
+					var data = [];
+					if(requests.length > 1){
+						$.each(arguments, function(key, val){
+							data.push(val);
+						});
+					}else if(arguments[0]){
+						data.push(arguments[0]);
+					}
+					successCallback(data);
+				});
+				return requests;
+			})
+		},
+
+		getColAnnotations: function(asset, successCallback, errorCallback) {
+			var refid = asset.reference.id;
+		   var annotationsUrl = ODFGlobals.annotationsUrl + "?assetReference=" + encodeURIComponent(refid);
+		   return $.ajax({
+			   url: annotationsUrl,
+			   type: 'GET',
+			   success: function(annotationData) {
+				   asset.annotations = annotationData.annotations;
+				   if (successCallback) {
+					   successCallback(asset);
+				   }
+			   },
+			   error: function(xhr, status, err) {
+				   if(errorCallback){
+					   errorCallback(err);
+				   }
+			   }
+		   }).then(function(annotationData) {
+			   asset.annotations = annotationData.annotations;
+			   return asset;
+		   });
+		},
+
+		loadAtlasAsset : function(asset, successCallback, errorCallback){
+			var url = ODFGlobals.metadataUrl + "/asset/" + encodeURIComponent(JSON.stringify({repositoryId: asset.repositoryId, id: asset.id}));
+			return $.ajax({
+		       url: url,
+		       type: 'GET',
+		       error: function(xhr, status, err) {
+		    	   if(errorCallback){
+		    		   errorCallback(err);
+		    	   }
+		       }
+			}).then( function(data) {
+	    		   var refid = data.reference.id;
+	    		   var annotationsUrl = ODFGlobals.annotationsUrl + "?assetReference=" + encodeURIComponent(refid);
+	    		   return $.ajax({
+	    			  url: annotationsUrl,
+	    			  type: 'GET',
+	    			  success: function(annotationData) {
+	    				  data.annotations = annotationData.annotations;
+	    				  if (successCallback) {
+	    					  successCallback(data);
+	    				  }
+	    			  },
+	    			  error: function(xhr, status, err) {
+	    				  if(errorCallback){
+	    					  errorCallback(err);
+	    				  }
+	    			  }
+	    		   }).then(function(annotationData) {
+	     			   data.annotations = annotationData.annotations;
+	    			   return data;
+	    		   });
+			});
+		},
+
+		searchAtlasMetadata : function(query, successCallback, errorCallback) {
+			var url = ODFGlobals.metadataUrl + "/search?" + $.param({query: query});
+			var req = $.ajax({
+				url: url,
+				dataType: 'json',
+				type: 'GET',
+				success: function(data) {
+					successCallback(data);
+				},
+				error: function(xhr, status, err) {
+					console.error(url, status, err.toString());
+					var msg = "Error while loading recent analysis requests: " + err.toString();
+					errorCallback(msg);
+				}
+			});
+			return req;
+		}
+	},
+
+	MetadataStore : {
+
+		getProperties(successCallback, alertCallback) {
+			if (alertCallback) {
+				alertCallback({type: ""});
+			}
+			return $.ajax({
+				url: ODFGlobals.metadataUrl,
+				dataType: 'json',
+				type: 'GET',
+				success: successCallback,
+				error: function(xhr, status, err) {
+					if (alertCallback) {
+						var msg = "Error while reading metadata store properties: " + err.toString();
+						alertCallback({type: "danger", message: msg});
+					}
+				}
+			});
+		}
+	},
+
+	ConfigurationStore : {
+
+	  // readUserDefinedProperties(successCallback, alertCallback) {
+	   readConfig(successCallback, alertCallback) {
+		   if (alertCallback) {
+		     alertCallback({type: ""});
+		   }
+	     // clear alert
+
+	     return $.ajax({
+	       url: ODFGlobals.apiPrefix + "settings",
+	       dataType: 'json',
+	       type: 'GET',
+	       success: successCallback,
+	       error: function(xhr, status, err) {
+	         if (alertCallback) {
+	            var msg = "Error while reading user defined properties: " + err.toString();
+	            alertCallback({type: "danger", message: msg});
+	         }
+	       }
+	      });
+	   },
+
+	   updateConfig(config, successCallback, alertCallback) {
+			if (alertCallback) {
+				 alertCallback({type: ""});
+			}
+
+		    return $.ajax({
+			       url: ODFGlobals.apiPrefix + "settings",
+			       contentType: "application/json",
+			       dataType: 'json',
+			       type: 'PUT',
+			       data: JSON.stringify(config),
+			       success: successCallback,
+			       error: function(xhr, status, err) {
+			         if (alertCallback) {
+			            var msg = "Error while reading user defined properties: " + err.toString();
+			            alertCallback({type: "danger", message: msg});
+			         }
+			       }
+		     });
+	   }
+	},
+
+	ServicesStore : {
+
+	  // readUserDefinedProperties(successCallback, alertCallback) {
+	   getServices(successCallback, alertCallback) {
+		   if (alertCallback) {
+		     alertCallback({type: ""});
+		   }
+	     // clear alert
+
+	     return $.ajax({
+	       url: ODFGlobals.apiPrefix + "services",
+	       dataType: 'json',
+	       type: 'GET',
+	       success: successCallback,
+	       error: function(xhr, status, err) {
+	         if (alertCallback) {
+	            var msg = "Error while getting list of ODF services: " + err.toString();
+	            alertCallback({type: "danger", message: msg});
+	         }
+	       }
+	      });
+	   }
+	},
+
+	URLHelper : {
+
+		getBaseHash : function(){
+			var baseHash = "#" + document.location.hash.split("#")[1];
+			var split = baseHash.split("/");
+			if(split.length>0){
+				return split[0];
+			}
+			return "";
+		},
+
+		setUrlHash : function(newAddition){
+			if(!newAddition){
+				newAddition = "";
+			}
+			if(newAddition != "" && typeof newAddition === "object"){
+				newAddition = JSON.stringify(newAddition);
+			}
+			var hash = document.location.hash;
+			var baseHash = this.getBaseHash();
+			if(!hash.startsWith(baseHash)){
+				return;
+			}
+			document.location.hash = baseHash + "/" + encodeURIComponent(newAddition);
+		}
+	}
+};
+
+module.exports = Utils;
diff --git a/odf/odf-web/src/main/webapp/swagger/index.html b/odf/odf-web/src/main/webapp/swagger/index.html
new file mode 100755
index 0000000..4eb6ff1
--- /dev/null
+++ b/odf/odf-web/src/main/webapp/swagger/index.html
@@ -0,0 +1,142 @@
+
+<!DOCTYPE html>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<html>
+<head>
+  <meta charset="UTF-8">
+  <title>Swagger UI</title>
+  <link rel="icon" type="image/png" href="images/favicon-32x32.png" sizes="32x32" />
+  <link rel="icon" type="image/png" href="images/favicon-16x16.png" sizes="16x16" />
+  <link href='css/typography.css' media='screen' rel='stylesheet' type='text/css'/>
+  <link href='css/reset.css' media='screen' rel='stylesheet' type='text/css'/>
+  <link href='css/screen.css' media='screen' rel='stylesheet' type='text/css'/>
+  <link href='css/reset.css' media='print' rel='stylesheet' type='text/css'/>
+  <link href='css/print.css' media='print' rel='stylesheet' type='text/css'/>
+  <script src='lib/jquery-1.8.0.min.js' type='text/javascript'></script>
+  <script src='lib/jquery.slideto.min.js' type='text/javascript'></script>
+  <script src='lib/jquery.wiggle.min.js' type='text/javascript'></script>
+  <script src='lib/jquery.ba-bbq.min.js' type='text/javascript'></script>
+  <script src='lib/handlebars-2.0.0.js' type='text/javascript'></script>
+  <script src='lib/underscore-min.js' type='text/javascript'></script>
+  <script src='lib/backbone-min.js' type='text/javascript'></script>
+  <script src='swagger-ui.js' type='text/javascript'></script>
+  <script src='lib/highlight.7.3.pack.js' type='text/javascript'></script>
+  <script src='lib/jsoneditor.min.js' type='text/javascript'></script>
+  <script src='lib/marked.js' type='text/javascript'></script>
+  <script src='lib/swagger-oauth.js' type='text/javascript'></script>
+
+  <!-- Some basic translations -->
+  <!-- <script src='lang/translator.js' type='text/javascript'></script> -->
+  <!-- <script src='lang/ru.js' type='text/javascript'></script> -->
+  <!-- <script src='lang/en.js' type='text/javascript'></script> -->
+
+  <script type="text/javascript">
+    $(function () {
+      var url = window.location.search.match(/url=([^&]+)/);
+      if (url && url.length > 1) {
+        url = decodeURIComponent(url[1]);
+      } else {
+        url = "swagger.json";
+      }
+
+      // Pre load translate...
+      if(window.SwaggerTranslator) {
+        window.SwaggerTranslator.translate();
+      }
+      window.swaggerUi = new SwaggerUi({
+        url: url,
+        validatorUrl: null,
+        dom_id: "swagger-ui-container",
+        supportedSubmitMethods: ['get', 'post', 'put', 'delete', 'patch'],
+        onComplete: function(swaggerApi, swaggerUi){
+          if(typeof initOAuth == "function") {
+            initOAuth({
+              clientId: "your-client-id",
+              clientSecret: "your-client-secret-if-required",
+              realm: "your-realms",
+              appName: "your-app-name",
+              scopeSeparator: ",",
+              additionalQueryStringParams: {}
+            });
+          }
+
+          if(window.SwaggerTranslator) {
+            window.SwaggerTranslator.translate();
+          }
+
+          $('pre code').each(function(i, e) {
+            hljs.highlightBlock(e)
+          });
+
+          addApiKeyAuthorization();
+        },
+        onFailure: function(data) {
+          log("Unable to Load SwaggerUI");
+        },
+        docExpansion: "none",
+        jsonEditor: false,
+        apisSorter: "alpha",
+        defaultModelRendering: 'schema',
+        showRequestHeaders: false
+      });
+
+      function addApiKeyAuthorization(){
+        var key = encodeURIComponent($('#input_apiKey')[0].value);
+        if(key && key.trim() != "") {
+            var apiKeyAuth = new SwaggerClient.ApiKeyAuthorization("api_key", key, "query");
+            window.swaggerUi.api.clientAuthorizations.add("api_key", apiKeyAuth);
+            log("added key " + key);
+        }
+      }
+
+      $('#input_apiKey').change(addApiKeyAuthorization);
+
+      // if you have an apiKey you would like to pre-populate on the page for demonstration purposes...
+      /*
+        var apiKey = "myApiKeyXXXX123456789";
+        $('#input_apiKey').val(apiKey);
+      */
+
+      window.swaggerUi.load();
+
+      function log() {
+        if ('console' in window) {
+          console.log.apply(console, arguments);
+        }
+      }
+  });
+  </script>
+</head>
+
+<body class="swagger-section">
+<div id='header'>
+  <div class="swagger-ui-wrap">
+    <a id="logo" href="http://swagger.io">swagger</a>
+    <form id='api_selector'>
+      <div class='input'><input placeholder="http://example.com/api" id="input_baseUrl" name="baseUrl" type="text"/></div>
+      <div class='input'><input placeholder="api_key" id="input_apiKey" name="apiKey" type="text"/></div>
+      <div class='input'><a id="explore" href="#" data-sw-translate>Explore</a></div>
+    </form>
+  </div>
+</div>
+
+<div id="message-bar" class="swagger-ui-wrap" data-sw-translate>&nbsp;</div>
+<div id="swagger-ui-container" class="swagger-ui-wrap"></div>
+</body>
+</html>
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java
new file mode 100755
index 0000000..6f23c0d
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/EngineResourceTest.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.admin;
+
+import java.io.InputStream;
+import java.util.Collection;
+
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.engine.ServiceRuntimeInfo;
+import org.apache.atlas.odf.api.engine.ServiceRuntimesInfo;
+import org.apache.atlas.odf.api.engine.SystemHealth;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class EngineResourceTest extends RestTestBase {
+
+	@Test
+	public void testHealth() throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/health");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Health check request returned: " + s);
+		checkResult(httpResp, HttpStatus.SC_OK);
+		SystemHealth health = JSONUtils.fromJSON(s, SystemHealth.class);
+		Assert.assertNotNull(health);
+	}
+	
+	boolean containsRuntimeWithName(Collection<ServiceRuntimeInfo> runtimes, String name) {
+		for (ServiceRuntimeInfo sri : runtimes) {
+			if (name.equals(sri.getName())) {
+				return true;
+			}
+		}
+		return false;
+	}
+	
+	@Test
+	public void testRuntimesInfo() throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/runtimes");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Runtime Info returned: " + s);
+		checkResult(httpResp, HttpStatus.SC_OK);
+		ServiceRuntimesInfo sri = JSONUtils.fromJSON(s, ServiceRuntimesInfo.class);
+		Assert.assertNotNull(sri);
+		Assert.assertTrue(sri.getRuntimes().size() > 2);
+		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "Java"));
+		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "Spark"));
+		Assert.assertTrue(containsRuntimeWithName(sri.getRuntimes(), "HealthCheck"));
+
+	}
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java
new file mode 100755
index 0000000..d093a73
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/admin/SettingsResourceTest.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.admin;
+
+import org.apache.atlas.odf.api.settings.MessagingConfiguration;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpStatus;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.settings.KafkaMessagingConfiguration;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class SettingsResourceTest extends RestTestBase {
+
+	@Test
+	public void testSettingsRead() throws Exception {
+		ODFSettings settings = settingsRead();
+		Assert.assertNotNull(settings);
+		MessagingConfiguration msgConfig = settings.getMessagingConfiguration();
+		Assert.assertNotNull(msgConfig);
+		Assert.assertTrue(msgConfig instanceof KafkaMessagingConfiguration);
+		KafkaMessagingConfiguration kafkaMsgConfig = (KafkaMessagingConfiguration) msgConfig;
+		Assert.assertNotNull(kafkaMsgConfig.getKafkaConsumerConfig());
+		Assert.assertNotNull(kafkaMsgConfig.getKafkaConsumerConfig().getZookeeperConnectionTimeoutMs());
+
+		Assert.assertNotNull(settings.getUserDefined());
+	}
+
+	@Test
+	public void testPasswordEncryption() throws Exception {
+		ODFSettings settings = settingsRead();
+		settings.setOdfPassword("newOdfPassword");
+		ODFSettings configWithPlainPasswords = settings;
+		settingsWrite(JSONUtils.toJSON(configWithPlainPasswords), HttpStatus.SC_OK);
+		logger.info("Settings with plain password: " + JSONUtils.toJSON(configWithPlainPasswords));
+
+		// REST API must return hidden password
+		ODFSettings configWithHiddenPasswords = settingsRead();
+		String hiddenPasswordIdentifyier = "***hidden***";
+		Assert.assertEquals(configWithHiddenPasswords.getOdfPassword(), hiddenPasswordIdentifyier);
+
+		// Reset passwords
+		Assert.assertNotNull(System.getProperty("odf.test.password"));
+		settings = settingsRead();
+		settings.setOdfPassword(Encryption.decryptText(System.getProperty("odf.test.password")));
+		settingsWrite(JSONUtils.toJSON(settings), HttpStatus.SC_OK);
+	}
+
+	@Test
+	public void testSettingsWriteSuccess() throws Exception {
+		String configSnippet = "{ \"runAnalysisOnImport\": false }";
+		logger.info("Testing write settings success with JSON: " + configSnippet);
+		settingsWrite(configSnippet, HttpStatus.SC_OK);
+	}
+	
+	@Test
+	public void testSettingsWriteFailure() throws Exception {
+		String configSnippet = "{ \"runAnalysisOnImport\": \"someInvalidValue\" }";
+		logger.info("Testing write settings failure with JSON: " + configSnippet);
+		settingsWrite(configSnippet, HttpStatus.SC_INTERNAL_SERVER_ERROR);
+	}
+
+	@Test
+	public void testSettingsReset() throws Exception {
+		logger.info("Testing reset settings operation.");
+		String updatedId = "updatedInstanceId";
+		ODFSettings originalConfig = settingsRead();
+		String originalInstanceId = originalConfig.getInstanceId();
+		originalConfig.setInstanceId(updatedId);
+
+		settingsWrite(JSONUtils.toJSON(originalConfig), HttpStatus.SC_OK);
+		
+		ODFSettings newConfig = settingsRead();
+		Assert.assertEquals(updatedId, newConfig.getInstanceId());
+
+		settingsReset();
+
+		ODFSettings resetConfig = settingsRead();
+		String resetInstanceId = resetConfig.getInstanceId();
+
+		Assert.assertEquals(originalInstanceId, resetInstanceId);
+	}
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java
new file mode 100755
index 0000000..21b7887
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/analysis/test/ODFVersionTest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.analysis.test;
+
+import java.io.InputStream;
+
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class ODFVersionTest extends RestTestBase {
+
+	@Test
+	public void testVersion() throws Exception {
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/engine/version");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Version request returned: " + s);
+
+		JSONObject jo = new JSONObject(s);
+		String version = jo.getString("version");
+		Assert.assertNotNull(version);
+		Assert.assertTrue(version.startsWith("1.2.0-"));
+	}
+
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java
new file mode 100755
index 0000000..4900d63
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/annotations/AnnotationsResourceTest.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.annotations;
+
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.annotation.Annotations;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.integrationtest.metadata.MetadataResourceTest;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.apache.http.entity.ContentType;
+import org.apache.wink.json4j.JSON;
+import org.apache.wink.json4j.JSONObject;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.models.ProfilingAnnotation;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class AnnotationsResourceTest extends RestTestBase {
+	Logger logger = Logger.getLogger(AnnotationsResourceTest.class.getName());
+
+	@Before
+	public void createSampleData() throws Exception {
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		checkResult(httpResp, HttpStatus.SC_OK);
+	}
+
+	public static class AnnotationsResourceTestProfilingAnnotation extends ProfilingAnnotation {
+		private String newAnnotProp;
+
+		public String getNewAnnotProp() {
+			return newAnnotProp;
+		}
+
+		public void setNewAnnotProp(String newAnnotProp) {
+			this.newAnnotProp = newAnnotProp;
+		}
+
+	}
+
+	static String newAnnotPropValue = "newAnnotPropValue" + UUID.randomUUID().toString();
+	static String newAnnotPropKey = "newAnnotProp";
+
+	static String unknownAnnotType = "UnknownAnnotType" + UUID.randomUUID().toString();
+
+	List<Annotation> createTestAnnotations(MetaDataObjectReference ref, String reqId) {
+		List<Annotation> result = new ArrayList<>();
+		AnnotationsResourceTestProfilingAnnotation annot = new AnnotationsResourceTestProfilingAnnotation();
+		annot.setProfiledObject(ref);
+		annot.setNewAnnotProp(newAnnotPropValue);
+		annot.setAnalysisRun(reqId);
+		result.add(annot);
+
+		ProfilingAnnotation genericAnnot = new ProfilingAnnotation();
+		genericAnnot.setProfiledObject(ref);
+		genericAnnot.setAnalysisRun(reqId);
+		genericAnnot.setJsonProperties("{\"" + newAnnotPropKey + "\": \"" + newAnnotPropValue + "\"}");
+		result.add(genericAnnot);
+
+		return result;
+	}
+
+	MetaDataObjectReference getTestDataSetRef() throws Exception {
+		String s = MetadataResourceTest.getAllMetadataObjectsOfType("DataFile");
+		logger.info("Retrieved test data set refs: " + s);
+		List<DataFile> dfRefs = JSONUtils.fromJSONList(s, DataFile.class);
+		return dfRefs.get(0).getReference();
+	}
+
+	@Test
+	public void testAnnotationStore() throws Exception {
+		MetaDataObjectReference dfRef = getTestDataSetRef();
+		String reqId = "TestRequestId" + UUID.randomUUID().toString();
+		logger.info("Test Annotatoin store with request ID: " + reqId);
+		List<Annotation> newAnnots = createTestAnnotations(dfRef, reqId);
+
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		List<String> createdAnnotIds = new ArrayList<>();
+		// create annotations
+		for (Annotation annot : newAnnots) {
+			String restRequestBody = JSONUtils.toJSON(annot);
+			logger.info("Creating annotation via request " + restRequestBody);
+			Request req = Request.Post(getBaseURI() + "/annotations").bodyString(restRequestBody, ContentType.APPLICATION_JSON);
+			Response resp = exec.execute(req);
+			HttpResponse httpResp = resp.returnResponse();
+			checkResult(httpResp, HttpStatus.SC_CREATED);
+			InputStream is = httpResp.getEntity().getContent();
+			MetaDataObjectReference createdAnnot = JSONUtils.fromJSON(is, MetaDataObjectReference.class);
+			Assert.assertNotNull(createdAnnot);
+			Assert.assertNotNull(createdAnnot.getId());
+			createdAnnotIds.add(createdAnnot.getId());
+		}
+		logger.info("Annotations created, now retrieving them again: " + createdAnnotIds);
+
+		// check retrieval
+		Request req = Request.Get(getBaseURI() + "/annotations?assetReference=" + dfRef.getId());
+		Response resp = exec.execute(req);
+
+		HttpResponse httpResp = resp.returnResponse();
+		checkResult(httpResp, HttpStatus.SC_OK);
+		Annotations retrieveResult = JSONUtils.fromJSON(httpResp.getEntity().getContent(), Annotations.class);
+		List<Annotation> retrievedAnnots = retrieveResult.getAnnotations();
+		logger.info("Retrieved annotations: " + retrievedAnnots);
+		int foundAnnots = 0;
+		for (Annotation retrievedAnnot : retrievedAnnots) {
+			logger.info("Checking annotation: " + retrievedAnnot.getReference());
+			logger.info("Annotation " + retrievedAnnot.getReference().getId() + " has request ID: " + retrievedAnnot.getAnalysisRun());
+			if (reqId.equals(retrievedAnnot.getAnalysisRun())) {
+				logger.info("Checking annotation " + retrievedAnnot + " of class " + retrievedAnnot.getClass());
+				Assert.assertTrue(retrievedAnnot instanceof ProfilingAnnotation);
+
+				if (retrievedAnnot instanceof AnnotationsResourceTestProfilingAnnotation) {
+					AnnotationsResourceTestProfilingAnnotation tpa = (AnnotationsResourceTestProfilingAnnotation) retrievedAnnot;
+					Assert.assertEquals(dfRef, tpa.getProfiledObject());
+					Assert.assertEquals(newAnnotPropValue, tpa.getNewAnnotProp());
+				} else {
+					// other annotations are "unknown", thus no subclass of ProfilingAnnotation
+					Assert.assertTrue(retrievedAnnot.getClass().equals(ProfilingAnnotation.class));
+					
+					String jsonProps = retrievedAnnot.getJsonProperties();
+					Assert.assertNotNull(jsonProps);
+					JSONObject jo = (JSONObject) JSON.parse(jsonProps);
+					Assert.assertTrue(jo.containsKey(newAnnotPropKey));
+					Assert.assertEquals(newAnnotPropValue, jo.getString(newAnnotPropKey));
+				}
+				Assert.assertTrue(createdAnnotIds.contains(retrievedAnnot.getReference().getId()));
+				foundAnnots++;
+				
+				// check that retrieval by Id works
+				logger.info("Retrieving annotation " + retrievedAnnot.getReference().getId() + " again");
+				String url = getBaseURI() + "/annotations/objects/" + retrievedAnnot.getReference().getId();
+				logger.info("Retriveing annotation with URL: " + url);
+				Request req1 = Request.Get(url);
+				Response resp1 = exec.execute(req1);
+
+				HttpResponse httpResp1 = resp1.returnResponse();
+				checkResult(httpResp1, HttpStatus.SC_OK);
+				Annotation newRetrievedAnnot = JSONUtils.fromJSON(httpResp1.getEntity().getContent(), Annotation.class);
+				Assert.assertEquals(retrievedAnnot.getReference(), newRetrievedAnnot.getReference());
+				Assert.assertEquals(retrievedAnnot.getClass(), newRetrievedAnnot.getClass());
+				Assert.assertEquals(retrievedAnnot.getJsonProperties(), newRetrievedAnnot.getJsonProperties());
+			}
+		}
+		Assert.assertEquals(createdAnnotIds.size(), foundAnnots);
+
+	}
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java
new file mode 100755
index 0000000..d76a272
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/MetadataResourceTest.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.metadata;
+
+import java.io.InputStream;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.apache.http.client.utils.URIBuilder;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.api.metadata.models.BusinessTerm;
+import org.apache.atlas.odf.api.metadata.models.DataFile;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class MetadataResourceTest extends RestTestBase {
+
+	static Logger logger = Logger.getLogger(MetadataResourceTest.class.getName());
+
+	@Before
+	public void createSampleData() throws Exception {
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		checkResult(httpResp, HttpStatus.SC_OK);
+	}
+
+	public static String getAllMetadataObjectsOfType(String dataType) throws Exception {
+		MetadataStore mdsForQueryGeneration = new ODFFactory().create().getMetadataStore();
+		String query = mdsForQueryGeneration.newQueryBuilder().objectType(dataType).build();
+		logger.info("Metadata search query metadata " + query);
+
+		URIBuilder builder = new URIBuilder(getBaseURI() + "/metadata/search").addParameter("query", query);
+		String uri = builder.build().toString();
+		logger.info("Searching against URL: " + uri);
+		Request req = Request.Get(uri);
+		Response response = getRestClientManager().getAuthenticatedExecutor().execute(req);
+		HttpResponse httpResp = response.returnResponse();
+		Assert.assertEquals(HttpStatus.SC_OK, httpResp.getStatusLine().getStatusCode());
+		InputStream is = httpResp.getEntity().getContent();
+		String s = JSONUtils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Response: " + s);
+		return s;
+	}
+
+	@Test
+	public void testMetadataResourceSearchOMDataFile() throws Exception {
+		String s = getAllMetadataObjectsOfType("DataFile");
+		Assert.assertTrue(s.contains("DataFile")); // minimal checking that JSON contains something useful and specific to this type
+		JSONUtils.fromJSONList(s, DataFile.class);
+	}
+
+	@Test
+	public void testMetadataResourceSearchOMBusinessTerm() throws Exception {
+		String s = getAllMetadataObjectsOfType("BusinessTerm");
+		Assert.assertTrue(s.contains("BusinessTerm")); // minimal checking that JSON contains something useful and specific to this type
+		JSONUtils.fromJSONList(s, BusinessTerm.class);
+	}
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java
new file mode 100755
index 0000000..c70c500
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/metadata/RemoteMetadataStoreTest.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.metadata;
+
+import java.net.URISyntaxException;
+import java.util.Properties;
+
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.core.integrationtest.metadata.MetadataStoreTestBase;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.MetaDataObjectReference;
+import org.apache.atlas.odf.api.metadata.MetadataQueryBuilder;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.MetadataStoreException;
+import org.apache.atlas.odf.api.metadata.RemoteMetadataStore;
+
+public class RemoteMetadataStoreTest extends MetadataStoreTestBase {
+
+	protected MetadataStore getMetadataStore() {
+		RemoteMetadataStore rms = null;
+		try {
+			rms = new RemoteMetadataStore(RestTestBase.getOdfBaseUrl(), RestTestBase.getOdfUser(), Encryption.decryptText(RestTestBase.getOdfPassword()), true);
+		} catch (MetadataStoreException | URISyntaxException e) {
+			throw new RuntimeException("Error connecting to remote metadata store,", e);
+		}
+		return rms;
+	}
+
+	//TODO: Remove all methods below this comment once the DefaultMetadataStore is queue-based (issue #122)
+	// RemoteMetadataStore will then use the exact same test cases as the other (writable) metadata stores 
+
+	@Before
+	public void createSampleData() {
+		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
+		MetadataStore mds = getMetadataStore();
+		mds.resetAllData();
+		mds.createSampleData();
+	}
+
+	@Test
+	public void testProperties() throws Exception {
+		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
+		RemoteMetadataStore rms = new RemoteMetadataStore(RestTestBase.getOdfBaseUrl(), RestTestBase.getOdfUser(), Encryption.decryptText(RestTestBase.getOdfPassword()), true);
+		Properties props = rms.getProperties();
+		Assert.assertNotNull(props);
+		Assert.assertTrue(!props.isEmpty());
+	}
+
+	@Test
+	public void testReferences() throws Exception {
+		//TODO: Do not overwrite original method once DefaultMetadataStore is queue-based
+		MetadataStore mds = getMetadataStore();
+		MetadataStoreTestBase.checkReferences(mds, MetadataStoreTestBase.getDataFileTestObject(mds));
+	}
+
+	@Test
+	public void testSearchAndRetrieve() {
+		//TODO: Do not overwrite original method once DefaultMetadataStore is queue-based
+
+		// Test retrieve
+		MetadataStore mds = getMetadataStore();
+		MetaDataObjectReference bankClientsShortRef = mds.search(mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build()).get(0);
+		Assert.assertEquals("The metadata store did not retrieve the object with the expected name.", "BankClientsShort", mds.retrieve(bankClientsShortRef).getName());
+
+		// Test queries with conditions
+		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
+		checkQueryResults(mds, new String[] { "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.NOT_EQUALS, "BankClientsShort").build(), false);
+		checkQueryResults(mds, new String[] { "NAME" },
+				mds.newQueryBuilder().objectType("Column").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "NAME").simpleCondition("dataType", MetadataQueryBuilder.COMPARATOR.EQUALS, "string").build(), false);
+
+		// Test type hierarchy
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("DataFile").build(), true);
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable" }, mds.newQueryBuilder().objectType("RelationalDataSet").build(), true);
+		checkQueryResults(mds, new String[] { "BankClientsShort", "SimpleExampleTable", "Simple URL example document", "Simple local example document" }, mds.newQueryBuilder().objectType("DataSet").build(), false);
+		checkQueryResults(mds, new String[] { "BankClientsShort" }, mds.newQueryBuilder().objectType("MetaDataObject").simpleCondition("name", MetadataQueryBuilder.COMPARATOR.EQUALS, "BankClientsShort").build(), false);
+	}
+
+	@Test
+	public void testAnnotations() {
+		//TODO: Remove this method once the DefaultMetadataStore is queue-based (issue #122)
+	}
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java
new file mode 100755
index 0000000..d7bbc0f
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/integrationtest/spark/SparkDiscoveryServiceWebTest.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.integrationtest.spark;
+
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.metadata.MetadataStore;
+import org.apache.atlas.odf.api.metadata.models.Annotation;
+import org.apache.atlas.odf.api.metadata.models.RelationalDataSet;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.rest.test.RestTestBase;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.apache.wink.json4j.JSONException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.atlas.odf.api.metadata.RemoteMetadataStore;
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.api.ODFFactory;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.annotation.AnnotationStore;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceProperties;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint;
+import org.apache.atlas.odf.api.discoveryservice.DiscoveryServiceSparkEndpoint.SERVICE_INTERFACE_TYPE;
+import org.apache.atlas.odf.core.integrationtest.metadata.internal.spark.SparkDiscoveryServiceLocalTest;
+import org.apache.atlas.odf.core.integrationtest.metadata.internal.spark.SparkDiscoveryServiceLocalTest.DATASET_TYPE;
+import org.apache.atlas.odf.api.settings.SparkConfig;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class SparkDiscoveryServiceWebTest extends RestTestBase {
+	protected static Logger logger = Logger.getLogger(SparkDiscoveryServiceWebTest.class.getName());
+
+	@Before
+	public void createSampleData() throws Exception {
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(getBaseURI() + "/metadata/sampledata");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		checkResult(httpResp, HttpStatus.SC_OK);
+	}
+
+	public static DiscoveryServiceProperties getSparkSummaryStatisticsService() throws JSONException {
+		DiscoveryServiceProperties dsProperties = new DiscoveryServiceProperties();
+		dsProperties.setId(SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
+		dsProperties.setName("Spark summary statistics service");
+		dsProperties.setDescription("Example discovery service calling summary statistics Spark application");
+		dsProperties.setIconUrl("spark.png");
+		dsProperties.setLink("http://www.spark.apache.org");
+		dsProperties.setPrerequisiteAnnotationTypes(null);
+		dsProperties.setResultingAnnotationTypes(null);
+		dsProperties.setSupportedObjectTypes(null);
+		dsProperties.setAssignedObjectTypes(null);
+		dsProperties.setAssignedObjectCandidates(null);
+		dsProperties.setParallelismCount(2);
+		DiscoveryServiceSparkEndpoint endpoint = new DiscoveryServiceSparkEndpoint();
+		endpoint.setJar("file:///tmp/odf-spark/odf-spark-example-application-1.2.0-SNAPSHOT.jar");
+		endpoint.setInputMethod(SERVICE_INTERFACE_TYPE.DataFrame);
+		endpoint.setClassName("org.apache.atlas.odf.core.spark.SummaryStatistics");
+		dsProperties.setEndpoint(JSONUtils.convert(endpoint, DiscoveryServiceEndpoint.class));
+		return dsProperties;
+	}
+
+	public void runSparkServiceTest(SparkConfig sparkConfig, DATASET_TYPE dataSetType, DiscoveryServiceProperties regInfo, String[] annotationNames) throws Exception{
+		logger.log(Level.INFO, "Testing spark application on ODF webapp url {0}.", getOdfBaseUrl());
+
+		logger.info("Using Spark configuration: " + JSONUtils.toJSON(sparkConfig));
+		ODFSettings settings = settingsRead();
+		settings.setSparkConfig(sparkConfig);
+		settings.setOdfUrl(Utils.getSystemPropertyExceptionIfMissing("odf.test.webapp.url"));
+		settingsWrite(JSONUtils.toJSON(settings), HttpStatus.SC_OK);
+
+		logger.log(Level.INFO, "Trying to delete existing discovery service: " + SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
+		deleteService(SparkDiscoveryServiceLocalTest.DISCOVERY_SERVICE_ID);
+
+		logger.info("Using discovery service: " + JSONUtils.toJSON(regInfo));
+		createService(JSONUtils.toJSON(regInfo), HttpStatus.SC_OK);
+
+		checkServiceExists(regInfo.getId());
+
+		MetadataStore mds = new RemoteMetadataStore(getOdfBaseUrl(), getOdfUser(), Encryption.decryptText(getOdfPassword()), true);
+		Assert.assertNotNull(mds);
+
+
+		RelationalDataSet dataSet = null;
+		if (dataSetType == DATASET_TYPE.FILE) {
+			dataSet = SparkDiscoveryServiceLocalTest.getTestDataFile(mds);
+		} else if (dataSetType == DATASET_TYPE.TABLE) {
+			dataSet = SparkDiscoveryServiceLocalTest.getTestTable(mds);
+		} else {
+			Assert.assertTrue(false);
+		}
+		logger.info("Using dataset: " + JSONUtils.toJSON(dataSet));
+
+		AnnotationStore as = new ODFFactory().create().getAnnotationStore();
+
+		AnalysisRequest request = SparkDiscoveryServiceLocalTest.getSparkAnalysisRequest(dataSet);
+		logger.info("Using analysis request: " + JSONUtils.toJSON(request));
+
+		logger.info("Starting analysis...");
+		String requestId = runAnalysis(request, State.FINISHED);
+
+		List<Annotation> annots = as.getAnnotations(null, requestId);
+		logger.info("Number of annotations created: " + annots.size());
+		Assert.assertTrue("No annotations have been created.", annots.size() > 0);
+	}
+
+	@Test
+	public void testSparkServiceRESTAPI() throws Exception{
+		runSparkServiceTest(SparkDiscoveryServiceLocalTest.getLocalSparkConfig(), DATASET_TYPE.FILE, getSparkSummaryStatisticsService(), new String[] { "SparkSummaryStatisticsAnnotation", "SparkTableAnnotation" });
+	}
+
+}
diff --git a/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java b/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java
new file mode 100755
index 0000000..e23dd4e
--- /dev/null
+++ b/odf/odf-web/src/test/java/org/apache/atlas/odf/rest/test/RestTestBase.java
@@ -0,0 +1,289 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.atlas.odf.rest.test;
+
+import java.io.InputStream;
+import java.net.URI;
+import java.text.MessageFormat;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.client.fluent.Executor;
+import org.apache.http.client.fluent.Request;
+import org.apache.http.client.fluent.Response;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.http.entity.ContentType;
+import org.apache.http.message.BasicHeader;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+
+import org.apache.atlas.odf.core.Encryption;
+import org.apache.atlas.odf.core.Utils;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestTrackers;
+import org.apache.atlas.odf.api.analysis.AnalysisRequest;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestSummary;
+import org.apache.atlas.odf.api.analysis.AnalysisResponse;
+import org.apache.atlas.odf.api.annotation.Annotations;
+import org.apache.atlas.odf.api.analysis.AnalysisRequestStatus.State;
+import org.apache.atlas.odf.api.connectivity.RESTClientManager;
+import org.apache.atlas.odf.api.settings.ODFSettings;
+import org.apache.atlas.odf.api.utils.ODFLogConfig;
+import org.apache.atlas.odf.core.test.TestEnvironment;
+import org.apache.atlas.odf.json.JSONUtils;
+
+public class RestTestBase {
+
+	protected static Logger logger = Logger.getLogger(RestTestBase.class.getName());
+
+	@BeforeClass
+	public static void setup() throws Exception {
+		ODFLogConfig.run();
+		TestEnvironment.startMessaging();
+	}
+	
+	protected static void checkResult(HttpResponse httpResponse, int expectedCode) {
+		StatusLine sl = httpResponse.getStatusLine();
+		int code = sl.getStatusCode();
+		logger.info("Http request returned: " + code + ", message: " + sl.getReasonPhrase());
+		Assert.assertEquals(expectedCode, code);
+	}
+
+	public static RESTClientManager getRestClientManager() {
+		return new RESTClientManager(URI.create(getOdfUrl()), getOdfUser(), Encryption.decryptText(getOdfPassword()));
+	}
+
+	public static String getOdfBaseUrl() {
+		String odfBaseURL = System.getProperty("odf.test.base.url");
+		return odfBaseURL;
+	}
+
+	public static String getOdfUrl() {
+		String odfURL = System.getProperty("odf.test.webapp.url");
+		return odfURL;
+	}
+
+	public static String getOdfUser() {
+		String odfUser = System.getProperty("odf.test.user");
+		return odfUser;
+	}
+
+	public static String getOdfPassword() {
+		String odfPassword = System.getProperty("odf.test.password");
+		return odfPassword;
+	}
+
+	public static String getBaseURI() {
+		return getOdfBaseUrl() + "/odf/api/v1";
+	}
+
+	public String runAnalysis(AnalysisRequest request, State expectedState) throws Exception {
+		Executor exec = getRestClientManager().getAuthenticatedExecutor();
+		String json = JSONUtils.toJSON(request);
+		logger.info("Starting analysis via POST request: " + json);
+
+		Header header = new BasicHeader("Content-Type", "application/json");
+		Request req = Request.Post(getBaseURI() + "/analyses").bodyString(json, ContentType.APPLICATION_JSON).addHeader(header);
+
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		checkResult(httpResp, HttpStatus.SC_OK);
+
+		InputStream is = httpResp.getEntity().getContent();
+		String jsonResponse = JSONUtils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Analysis response: " + jsonResponse);
+		AnalysisResponse analysisResponse = JSONUtils.fromJSON(jsonResponse, AnalysisResponse.class);
+		Assert.assertNotNull(analysisResponse);
+		String requestId = analysisResponse.getId();
+		Assert.assertNotNull(requestId);
+		logger.info("Request Id: " + requestId);
+
+		Assert.assertTrue(! analysisResponse.isInvalidRequest());
+		
+		AnalysisRequestStatus status = null;
+		int maxPolls = 400;
+		do {
+			Request statusRequest = Request.Get(getBaseURI() + "/analyses/" + requestId);
+			logger.info("Getting analysis status");
+			resp = exec.execute(statusRequest);
+			httpResp = resp.returnResponse();
+			checkResult(httpResp, HttpStatus.SC_OK);
+
+			String statusResponse = JSONUtils.getInputStreamAsString(httpResp.getEntity().getContent(), "UTF-8");
+			logger.info("Analysis status: " + statusResponse);
+			status = JSONUtils.fromJSON(statusResponse, AnalysisRequestStatus.class);
+
+			logger.log(Level.INFO, "Poll request for request ID ''{0}'' (expected state: ''{1}'', details: ''{2}''", new Object[] { requestId, status.getState(), status.getDetails(), State.FINISHED });
+			maxPolls--;
+			Thread.sleep(1000);
+		} while (maxPolls > 0 && (status.getState() == State.ACTIVE || status.getState() == State.QUEUED));
+		Assert.assertEquals(State.FINISHED, status.getState());
+		return requestId;
+	}
+
+	public void createService(String serviceJSON, int expectedCode) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+
+		Request req = Request.Post(RestTestBase.getBaseURI() + "/services")//
+				.bodyString(serviceJSON, ContentType.APPLICATION_JSON) //
+		.addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Create service request return code: " + httpResp.getStatusLine().getStatusCode() + ", content: " + s);
+		checkResult(httpResp, expectedCode);
+	}
+	
+	public void checkServiceExists(String serviceId) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/services/" + serviceId).addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Get service request return code: " + httpResp.getStatusLine().getStatusCode() + ", content: " + s);
+		checkResult(httpResp, 200);
+		
+	}
+
+	public void deleteService(String serviceId, int expectedCode) throws Exception {
+		checkResult(this.deleteService(serviceId), expectedCode);
+	}
+
+	public HttpResponse deleteService(String serviceId) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+		URIBuilder uri = new URIBuilder(RestTestBase.getBaseURI() + "/services/" + serviceId + "/cancel");
+		Request req = Request.Post(uri.build())//
+				.addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Delete service request returned: " + s);
+		return httpResp;
+	}
+
+	public ODFSettings settingsRead() throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/settings");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Settings read request returned: " + s);
+		is.close();
+		checkResult(httpResp, HttpStatus.SC_OK);
+		return JSONUtils.fromJSON(s, ODFSettings.class);
+	}
+
+	public void settingsWrite(String configSnippet, int expectedCode) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+
+		Request req = Request.Put(RestTestBase.getBaseURI() + "/settings")//
+				.bodyString(configSnippet, ContentType.APPLICATION_JSON) //
+		.addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Settings write request returned: " + s);
+		checkResult(httpResp, expectedCode);
+	}
+
+	public void settingsReset() throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+		Request req = Request.Post(RestTestBase.getBaseURI() + "/settings/reset")//
+		.addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Config reset request returned: " + s);
+		checkResult(httpResp, HttpStatus.SC_OK);
+	}
+
+	public void cancelAnalysisRequest(String requestId, int expectedCode) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Header header = new BasicHeader("Content-Type", "application/json");
+
+		Request req = Request.Post(RestTestBase.getBaseURI() + "/analyses/" + requestId + "/cancel").addHeader(header);
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		is.close();
+		logger.info("Cancel analyses request returned: " + s);
+		checkResult(httpResp, expectedCode);
+	}
+
+	public AnalysisRequestTrackers getAnalysesRequests(int offset, int limit) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(MessageFormat.format("{0}/analyses?offset={1}&limit={2}", RestTestBase.getBaseURI(), offset, limit));
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Analyses read request returned: " + s);
+		is.close();
+		checkResult(httpResp, HttpStatus.SC_OK);
+		return JSONUtils.fromJSON(s, AnalysisRequestTrackers.class);
+	}
+
+	public AnalysisRequestSummary getAnalysesStats() throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		Request req = Request.Get(RestTestBase.getBaseURI() + "/analyses/stats");
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Analyses statistics request returned: " + s);
+		is.close();
+		checkResult(httpResp, HttpStatus.SC_OK);
+		return JSONUtils.fromJSON(s, AnalysisRequestSummary.class);
+	}
+
+	public Annotations getAnnotations(String analysisRequestId) throws Exception {
+		Executor exec = RestTestBase.getRestClientManager().getAuthenticatedExecutor();
+		URIBuilder uri = new URIBuilder(RestTestBase.getBaseURI() + "/annotations").addParameter("analysisRequestId", analysisRequestId);
+		Request req = Request.Get(uri.build());
+		Response resp = exec.execute(req);
+		HttpResponse httpResp = resp.returnResponse();
+		InputStream is = httpResp.getEntity().getContent();
+
+		String s = Utils.getInputStreamAsString(is, "UTF-8");
+		logger.info("Settings read request returned: " + s);
+		is.close();
+		checkResult(httpResp, HttpStatus.SC_OK);
+		return JSONUtils.fromJSON(s, Annotations.class);
+	}
+}
diff --git a/odf/odf-web/webpack.config.js b/odf/odf-web/webpack.config.js
new file mode 100755
index 0000000..380f705
--- /dev/null
+++ b/odf/odf-web/webpack.config.js
@@ -0,0 +1,65 @@
+/**
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+var path = require('path');
+
+const APP_ROOT="./src/main/webapp";
+const MAIN_FILE= path.resolve(APP_ROOT + "/scripts/odf-console.js");
+const CLIENT_FILE= path.resolve(APP_ROOT + "/scripts/odf-client.js");
+
+module.exports = {
+	entry: {
+		"odf-web": MAIN_FILE,
+		"odf-client": CLIENT_FILE
+	},
+
+    output: {
+        filename: "/[name].js",
+        path: path.resolve(APP_ROOT)
+    },
+
+    module: {
+	    loaders: [
+	      {
+	        test: /\.jsx?$/,
+	        loader: 'babel',
+	        query: {
+	            presets: ['react', 'es2015']
+	        },
+	        	include: /(webapp)/,
+	        	exlude: /(odf-web.js)/
+	      },
+	      {
+	    	  test: /\.(jsx|js)$/,
+	    	  loader: 'imports?jQuery=jquery,$=jquery,this=>window'
+	      },
+	      {
+	          test: /\.css$/,
+	          loader: 'style!css'
+	      },
+	      {
+	          test: /\.(png|jpg)$/,
+	          loader: 'url?limit=25000&name=resources/img/[hash].[ext]'
+	      },
+	      {
+	    	  test: /\.woff(2)?(\?v=[0-9]\.[0-9]\.[0-9])?$/,
+        	  loader: 'url-loader?limit=25000&&minetype=application/font-woff&name=resources/fonts/[hash].[ext]'
+          },
+          {
+        	  test: /\.(ttf|eot|svg)(\?v=[0-9]\.[0-9]\.[0-9])?$/,
+	          loader: 'url?limit=25000&name=resources/fonts/[hash].[ext]'
+          }
+	    ]
+    }
+}
diff --git a/odf/pom.xml b/odf/pom.xml
new file mode 100755
index 0000000..2e1f263
--- /dev/null
+++ b/odf/pom.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+	<modelVersion>4.0.0</modelVersion>
+	<artifactId>odf</artifactId>
+	<name>odf</name>
+	<groupId>org.apache.atlas.odf</groupId>
+	<version>1.2.0-SNAPSHOT</version>
+	<packaging>pom</packaging>
+
+
+	<modules>
+		<module>odf-api</module>
+		<module>odf-core</module>
+		<module>odf-store</module>
+		<module>odf-messaging</module>
+	</modules>
+
+	<profiles>
+		<profile>
+			<id>atlas</id>
+			<modules>
+				<module>odf-atlas</module>
+			</modules>
+		</profile>
+		<profile>
+			<id>complete-build</id>
+			<activation>
+				<property>
+					<name>reduced-build</name>
+					<value>!true</value>
+				</property>
+			</activation>
+			<modules>
+				<module>odf-spark-example-application</module>
+				<module>odf-spark</module>
+				<module>odf-doc</module>
+				<module>odf-web</module>
+				<module>odf-archetype-discoveryservice</module>
+			</modules>
+		</profile>
+		<profile>
+			<id>test-env</id>
+			<modules>
+				<module>odf-test-env</module>
+			</modules>
+		</profile>
+	</profiles>
+
+	<properties>
+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+		<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+		<testZookeepeConnectionString>localhost:2181</testZookeepeConnectionString>
+		<odf.test.logdir>/tmp</odf.test.logdir>
+		<odf.unittest.logspec>ALL,${odf.test.logdir}/${project.name}-unit-trace.log</odf.unittest.logspec>
+		<odf.integrationtest.logspec>ALL,${odf.test.logdir}/${project.name}-integration-trace.log</odf.integrationtest.logspec>
+		<jackson.version>2.6.5</jackson.version>
+		<jetty.maven.plugin.port>58080</jetty.maven.plugin.port>
+		<odf.test.base.url>https://localhost:${jetty.maven.plugin.port}</odf.test.base.url>
+		<odf.test.webapp.url>https://localhost:${jetty.maven.plugin.port}/odf-web-1.2.0-SNAPSHOT</odf.test.webapp.url>
+		<odf.test.user>sdp</odf.test.user>
+		<odf.test.password>ZzTeX3hKtVORgks+2TaLPWxerucPBoxK</odf.test.password>
+		<atlas.version>0.7-incubating-release</atlas.version>
+		<atlas.url>https://localhost:21443</atlas.url>
+		<atlas.user>admin</atlas.user>
+		<atlas.password>UR0+HOiApXG9B8SNpKN5ww==</atlas.password>
+	</properties>
+
+	<build>
+		<plugins>
+			<!-- make sure we are compiling for Java 1.7 -->
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-compiler-plugin</artifactId>
+				<version>2.3.2</version>
+				<configuration>
+					<source>1.7</source>
+					<target>1.7</target>
+				</configuration>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-antrun-plugin</artifactId>
+				<version>1.8</version>
+				<executions>
+					<execution>
+						<inherited>false</inherited>
+						<phase>test</phase>
+						<goals>
+							<goal>run</goal>
+						</goals>
+						<configuration>
+							<tasks>
+								<delete>
+									<fileset dir="/tmp/" includes="odf-test-execution-log.csv"/>
+								</delete>
+							</tasks>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-dependency-plugin</artifactId>
+				<version>2.10</version>
+				<executions>
+					<execution>
+						<id>list-dependencies</id>
+						<phase>validate</phase>
+						<goals>
+							<goal>tree</goal>
+						</goals>
+						<configuration>
+						</configuration>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+</project>
diff --git a/odf/prepare_embedded_jetty.xml b/odf/prepare_embedded_jetty.xml
new file mode 100755
index 0000000..c9aa044
--- /dev/null
+++ b/odf/prepare_embedded_jetty.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+~
+~ Licensed under the Apache License, Version 2.0 (the "License");
+~ you may not use this file except in compliance with the License.
+~ You may obtain a copy of the License at
+~
+~   http://www.apache.org/licenses/LICENSE-2.0
+~
+~ Unless required by applicable law or agreed to in writing, software
+~ distributed under the License is distributed on an "AS IS" BASIS,
+~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~ See the License for the specific language governing permissions and
+~ limitations under the License.
+-->
+
+<project name="prepare_embedded_jetty">
+
+	<dirname property="script.basedir" file="${ant.file.prepare_embedded_jetty}" />
+	<property name="source-dir" value="${script.basedir}/jettyconfig" />
+	<property name="download-dir" value="${script.basedir}/target/downloads/jettyconfig" />
+	<property name="target-dir" value="${script.basedir}/target/jettyconfig" />
+
+	<condition property="is-windows">
+		<os family="windows">
+		</os>
+	</condition>
+
+	<condition property="is-unix">
+		<os family="unix">
+		</os>
+	</condition>
+
+	<condition property="is-mac">
+		<os family="mac">
+		</os>
+	</condition>
+
+	<condition property="config-available">
+	   <available file="${target-dir}"/>
+    </condition>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="create-directories">
+		<mkdir dir="${download-dir}"/>
+		<mkdir dir="${target-dir}"/>
+	</target>
+
+	<target name="copy-config-files">
+		<copy todir="${target-dir}">
+			<fileset dir="${source-dir}" />
+			<fileset dir="${download-dir}" />
+		</copy>
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="download-keystore-file-windows" if="is-windows">
+		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${download-dir}/keystore.jks" />
+		<echo message="Downloaded IBM JDK keystore because we are on Windows." />
+	</target>
+
+	<target name="download-keystore-file-mac" if="is-mac">
+		<get verbose="true" src="https://ibm.box.com/shared/static/odnmhqua5sdue03z43vqsv0lp509ov70.jks" dest="${download-dir}/keystore.jks" />
+		<echo message="Downloaded OpenJDK keystore because we are on Mac." />
+	</target>
+
+	<target name="download-keystore-file-unix" if="is-unix">
+		<get verbose="true" src="https://ibm.box.com/shared/static/k0qgh31ynbgnjsrbg5s97hsqbssh6pd4.jks" dest="${download-dir}/keystore.jks" />
+		<echo message="Downloaded IBM JDK keystore because we are on UNIX (Other than Mac)." />
+	</target>
+
+	<target name="download-keystore-file" depends="download-keystore-file-unix,download-keystore-file-windows,download-keystore-file-mac">
+		<!-- keystore.jks file is stored in Box@IBM - Re-generate the file using the Java keytool -->
+		<!-- command: keytool -genkey -alias myatlas -keyalg RSA -keystore /tmp/atlas-security/keystore.jks -keysize 2048 -->
+		<!-- Note that ibm jdk uses different format than oracle/open jdk, therefore a separate version has to be generated for each jdk -->
+	</target>
+
+	<!-- ****************************************************************************************** -->
+
+	<target name="prepare-jetty-config" unless="config-available">
+		<echo message="Preparing jetty configuration..." />
+		<antcall target="create-directories" />
+		<antcall target="download-keystore-file"/>
+		<antcall target="copy-config-files"/>
+		<echo message="Jetty configuration completed." />
+	</target>
+
+</project>
diff --git a/pom.xml b/pom.xml
index b7f5ea0..3893b8f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2029,6 +2029,11 @@
                         <exclude>**/policy-store.txt</exclude>
                         <exclude>**/*rebel*.xml</exclude>
                         <exclude>**/*rebel*.xml.bak</exclude>
+
+                        <!-- execute following files in ODF directory -->
+                        <exclude>**/.gitignore</exclude>
+                        <exclude>odf/**/*.csv</exclude>
+                        <exclude>odf/**/*.txt</exclude>
                     </excludes>
                 </configuration>
                 <executions>